diff --git "a/1026.jsonl" "b/1026.jsonl" new file mode 100644--- /dev/null +++ "b/1026.jsonl" @@ -0,0 +1,446 @@ +{"seq_id": "456329564", "text": "import cv2\nimport numpy as np\n\nPATH = '/home/felipe/Imagens/ball.png' \n\nframe = cv2.imread(PATH)\nimg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\ncircles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 260, param1=30, param2=65, minRadius=0, maxRadius=0)\n\nif circles is not None:\n for x, y, r in circles[0]:\n cv2.circle(frame,(x,y),r,(0,255,0),2)\n\ncv2.imshow('Xamaa',frame)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "sub_path": "Resgate/teste/balls_black.py", "file_name": "balls_black.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.HoughCircles", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.HOUGH_GRADIENT", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "327864145", "text": "\"\"\"empty message\n\nRevision ID: f49f08e77ed5\nRevises: f484298d9b7b\nCreate Date: 2019-03-26 22:27:17.726994\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f49f08e77ed5'\ndown_revision = 'f484298d9b7b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('task', sa.Column('finished', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('task', 'finished')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/f49f08e77ed5_.py", "file_name": "f49f08e77ed5_.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "41247363", "text": "import requests\nfrom bs4 import BeautifulSoup\n\ndef aliexpress(product,budget):\n \n pages=1\n max_pages=2\n\n url='http://www.aliexpress.com/wholesale?catId=0&initiative_id=SB_20170821004256&SearchText='+str(product)\n\n while(pages<=max_pages):\n\n print(\"Page no. = \" +str(pages))\n print(\"Page link = \" + str(url))\n pages+=1\n\n source_code=requests.get(url)\n text=source_code.text\n soup=BeautifulSoup(text,'lxml')\n \n for page in soup.findAll('div',{'class' : 'ui-pagination-navi util-left'}):\n \n for link in page.findAll('a',{'class' : 'page-next ui-pagination-next'}):\n\n print('Im inside the loop')\n url=link.get('href')\n max_pages=link.text\n url='https:' + str(url)\n print(url)\n break\n \n for product in soup.findAll('li',{'class' : ['list-item list-item-first ','list-item ']}):\n \n for info in product.findAll('div',{'class' : 'info'}):\n \n for details in info.findAll('a',{'class' : 'history-item product '}):\n \n link=details.get('href')\n title=details.get('title')\n print('\\nName of the product : ' + str(title))\n print('Product link :'+str(link)+'\\n')\n\n", "sub_path": "aliexpress.py", "file_name": "aliexpress.py", "file_ext": "py", "file_size_in_byte": 1405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "53594229", "text": "import setuptools\nfrom distutils.core import Extension\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"codesnap\",\n version=\"0.0.4\",\n author=\"Tian Gao\",\n author_email=\"gaogaotiantian@hotmail.com\",\n description=\"A profiling tool that can visualize python code in flame graph\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/gaogaotiantian/codesnap\",\n packages=setuptools.find_packages(\"src\"),\n package_dir={\"\":\"src\"},\n package_data={\n \"codesnap\": [\n \"html/*.js\",\n \"html/*.css\",\n \"html/*.html\"\n ]\n },\n ext_modules=[\n Extension(\n \"codesnap.snaptrace\",\n sources = [\n \"src/codesnap/modules/snaptrace.c\",\n ]\n )\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n python_requires=\">=3.5\",\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 16, "usage_type": "call"}, {"api_name": "distutils.core.Extension", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "517585627", "text": "from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\n\nfrom django.utils import timezone\n\nfrom .models import Genre, Item, Order\nfrom django.db.models import Sum\n\nimport re\n\nfrom django.db.models import Q\n\n# Create your views here.\n\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n ''' Splits the query string in invidual keywords, getting rid of unecessary spaces\n and grouping quoted words together.\n Example:\n\n >>> normalize_query(' some random words \"with quotes \" and spaces')\n ['some', 'random', 'words', 'with quotes', 'and', 'spaces']\n\n '''\n return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]\n\n\ndef get_query(query_string, search_fields):\n ''' Returns a query, that is a combination of Q objects. That combination\n aims to search keywords within a model by testing the given search fields.\n\n '''\n query = None # Query to search for every search term\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n if or_query is None:\n or_query = q\n else:\n or_query = or_query | q\n if query is None:\n query = or_query\n else:\n query = query & or_query\n return query\n\ndef indexView(request):\n genre_list = Genre.objects.order_by('pub_date')[:4]\n context = {'genre_list': genre_list}\n\n if (request.POST):\n mail = request.POST['mail']\n dir = request.POST['dir']\n id = mail + \",\" + dir\n\n item_list = Item.objects.filter(inCart = True)\n\n totalPrice = 0\n for item in item_list:\n totalPrice += item.price\n\n Order.objects.create(ordererMail = mail, ordererDir = dir, orderPrice = totalPrice, orderId = id)\n\n for item in item_list:\n item.inCart = False\n item.save()\n\n return render(request, 'shop/index.html', context)\n\ndef genreView(request, genre_id):\n genre = get_object_or_404(Genre, id=genre_id)\n genre_list = Genre.objects.order_by('pub_date')[:4]\n item_list = Item.objects.filter(genre__id = genre_id).order_by('pub_date')[:4]\n context = {'genre_list': genre_list, 'item_list': item_list, 'actgenre': genre}\n return render(request, 'shop/genre.html', context)\n\ndef itemView(request, genre_id, item_id):\n # Echar un ojo a esto ya que deberia buscar genero y dentro del genero el item\n genre_list = Genre.objects.order_by('pub_date')[:4]\n item = get_object_or_404(Item, id=item_id)\n context = {'genre_list': genre_list, 'item': item}\n return render(request, 'shop/detail.html', context)\n\ndef cartView(request, item_id=None):\n if (item_id):\n addedItem = Item.objects.get(id = item_id)\n addedItem.inCart = not addedItem.inCart\n addedItem.save()\n\n genre_list = Genre.objects.order_by('pub_date')[:4]\n item_list = Item.objects.filter(inCart = True).order_by('pub_date')\n totalPrice = 0\n for item in item_list:\n totalPrice += item.price\n context = {'genre_list': genre_list, 'item_list': item_list, 'total': totalPrice}\n\n return render(request, 'shop/cart.html', context)\n\ndef search(request):\n genre_list = Genre.objects.order_by('pub_date')[:4]\n\n item_list = None\n\n if ('searchbox' in request.GET) and request.GET['searchbox'].strip():\n query_string = request.GET['searchbox']\n search_fields = ['text', 'id', 'genre__text', 'genre__id']\n\n entry_query = get_query(query_string, search_fields)\n\n item_list = Item.objects.filter(entry_query).order_by('pub_date')\n\n context = {'genre_list': genre_list, 'item_list': item_list}\n return render(request, 'shop/search.html', context)", "sub_path": "Entrega/LTAW/PRACTICA4/shop/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Genre.objects.order_by", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 61, "usage_type": "name"}, {"api_name": "models.Order.objects.create", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Genre", "line_number": 76, "usage_type": "argument"}, {"api_name": "models.Genre.objects.order_by", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 77, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Genre.objects.order_by", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 84, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 85, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 85, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Item.objects.get", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 91, "usage_type": "name"}, {"api_name": "models.Genre.objects.order_by", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 95, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 96, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Genre.objects.order_by", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 115, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "628842232", "text": "import os\nimport pickle\nimport numpy as np\nfrom datetime import timedelta\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima_model import ARIMA\n\ndef forecast(ser, start_date, end_date):\n \"\"\" Function that uses the ARIMA model to return the forecasted\n price of a user's stay and a visualization of the prices\n \"\"\"\n\n # Fit model to data before requested date\n history = ser[ser.index.date < start_date]\n arima_params = pickle.load(open(os.path.join(\"models\", \"ARIMA_params.pkl\"), \"rb+\"))\n model = ARIMA(history, order=(9, 2, 6))\n results = model.fit(arima_params)\n \n # Calculate how many values we need to forecast\n duration = (end_date - start_date).days\n predictions = results.forecast(duration)[0]\n\n # Create plot of forecasted values with confidence interval\n month = timedelta(days=31)\n fig, ax = plt.subplots(figsize=(10, 5))\n fig.suptitle(\"Airbnb Price Forecasts\")\n plt.ylabel(\"Price($)\")\n plot_start = start_date - 2 * month\n plot_end = end_date + month\n ax.plot(ser[(ser.index.date >= plot_start) & (ser.index.date <= plot_end)], c=\"r\")\n results.plot_predict(plot_start, plot_end, ax=ax)\n ax.lines.pop(2)\n\n # Return computed price and the plot\n return np.sum(predictions), fig", "sub_path": "forecast.py", "file_name": "forecast.py", "file_ext": "py", "file_size_in_byte": 1265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pickle.load", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "statsmodels.tsa.arima_model.ARIMA", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "619354040", "text": "import matplotlib.pyplot as plt\n\nvalues = list(range(1, 1000))\nsquares = list(v**2 for v in range(1, 1000))\nplt.scatter(values, squares, s=40, c=squares, cmap=plt.cm.Blues)\n\nplt.title('Squares of Numbers', fontsize=24)\nplt.xlabel('Numbers', fontsize=14)\nplt.ylabel('Squares', fontsize=14)\n\nplt.tick_params(axis='both', which='major', labelsize=14)\n\nplt.axis([0, 1100, 0, 1100000])\n\nplt.show()", "sub_path": "code/squares.py", "file_name": "squares.py", "file_ext": "py", "file_size_in_byte": 392, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.scatter", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "327716316", "text": "import mock\nimport pytest\n\nfrom squeaknode.config.config import SqueaknodeConfig\nfrom squeaknode.core.lightning_address import LightningAddressHostPort\nfrom squeaknode.core.squeak_controller import SqueakController\nfrom squeaknode.core.squeak_core import SqueakCore\nfrom squeaknode.core.squeak_peer import SqueakPeer\nfrom squeaknode.db.squeak_db import SqueakDb\nfrom squeaknode.node.squeak_rate_limiter import SqueakRateLimiter\nfrom squeaknode.node.squeak_whitelist import SqueakWhitelist\n\n\n@pytest.fixture\ndef config():\n squeaknode_config = SqueaknodeConfig()\n squeaknode_config.read()\n return squeaknode_config\n\n\n@pytest.fixture\ndef regtest_config():\n squeaknode_config = SqueaknodeConfig(\n dict_config={'core': {'network': 'regtest'}}\n )\n squeaknode_config.read()\n return squeaknode_config\n\n\n@pytest.fixture\ndef squeak_db():\n # return SqueakDb(None, None, None)\n return mock.Mock(spec=SqueakDb)\n\n\n@pytest.fixture\ndef squeak_core():\n return mock.Mock(spec=SqueakCore)\n\n\n@pytest.fixture\ndef lightning_host_port():\n return LightningAddressHostPort(host=\"my_lightning_host\", port=8765)\n\n\n@pytest.fixture\ndef price_msat():\n return 777\n\n\n@pytest.fixture\ndef max_squeaks_per_address_per_hour():\n return 5000\n\n\n@pytest.fixture\ndef squeak_whitelist():\n return mock.Mock(spec=SqueakWhitelist)\n\n\n@pytest.fixture\ndef squeak_rate_limiter():\n return mock.Mock(spec=SqueakRateLimiter)\n\n\n@pytest.fixture\ndef squeak_controller(\n squeak_db,\n squeak_core,\n squeak_whitelist,\n squeak_rate_limiter,\n config,\n):\n return SqueakController(\n squeak_db,\n squeak_core,\n squeak_whitelist,\n squeak_rate_limiter,\n config,\n )\n\n\n@pytest.fixture\ndef regtest_squeak_controller(\n squeak_db,\n squeak_core,\n squeak_whitelist,\n squeak_rate_limiter,\n regtest_config,\n):\n return SqueakController(\n squeak_db,\n squeak_core,\n squeak_whitelist,\n squeak_rate_limiter,\n regtest_config,\n )\n\n\ndef test_nothing():\n assert True\n\n\ndef test_get_buy_offer(squeak_controller):\n assert squeak_controller.get_buy_offer is not None\n\n\ndef test_get_network_default(squeak_controller):\n assert squeak_controller.get_network() == \"testnet\"\n\n\ndef test_get_network_regtest(regtest_squeak_controller):\n assert regtest_squeak_controller.get_network() == \"regtest\"\n\n\n# def test_get_network_regtest(config, squeak_controller):\n# # with mock.patch.object(Config, 'squeaknode_network', new_callable=mock.PropertyMock) as mock_config:\n# # mock_config.return_value = 'regtest'\n# config.squeaknode_network = \"regtest\"\n# print(config.squeaknode_network)\n\n# assert squeak_controller.get_network() == \"regtest\"\n\n\ndef test_create_peer(squeak_db, squeak_controller):\n squeak_controller.create_peer(\n \"fake_peer_name\",\n \"fake_host\",\n 5678,\n )\n\n squeak_db.insert_peer.assert_called_with(\n SqueakPeer(\n peer_id=None,\n peer_name=\"fake_peer_name\",\n host=\"fake_host\",\n port=5678,\n uploading=False,\n downloading=False,\n )\n )\n\n\ndef test_create_peer_default_port(config, squeak_db, squeak_controller):\n squeak_controller.create_peer(\n \"fake_peer_name\",\n \"fake_host\",\n 0,\n )\n\n squeak_db.insert_peer.assert_called_with(\n SqueakPeer(\n peer_id=None,\n peer_name=\"fake_peer_name\",\n host=\"fake_host\",\n port=config.core.default_peer_rpc_port,\n uploading=False,\n downloading=False,\n )\n )\n", "sub_path": "tests/core/test_squeak_controller.py", "file_name": "test_squeak_controller.py", "file_ext": "py", "file_size_in_byte": 3619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "squeaknode.config.config.SqueaknodeConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "attribute"}, {"api_name": "squeaknode.config.config.SqueaknodeConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 33, "usage_type": "call"}, {"api_name": "squeaknode.db.squeak_db.SqueakDb", "line_number": 33, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 38, "usage_type": "call"}, {"api_name": "squeaknode.core.squeak_core.SqueakCore", "line_number": 38, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 36, "usage_type": "attribute"}, {"api_name": "squeaknode.core.lightning_address.LightningAddressHostPort", "line_number": 43, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 51, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 58, "usage_type": "call"}, {"api_name": "squeaknode.node.squeak_whitelist.SqueakWhitelist", "line_number": 58, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 63, "usage_type": "call"}, {"api_name": "squeaknode.node.squeak_rate_limiter.SqueakRateLimiter", "line_number": 63, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 61, "usage_type": "attribute"}, {"api_name": "squeaknode.core.squeak_controller.SqueakController", "line_number": 74, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 66, "usage_type": "attribute"}, {"api_name": "squeaknode.core.squeak_controller.SqueakController", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 83, "usage_type": "attribute"}, {"api_name": "squeaknode.core.squeak_peer.SqueakPeer", "line_number": 133, "usage_type": "call"}, {"api_name": "squeaknode.core.squeak_peer.SqueakPeer", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "1065438", "text": "from turtle import Screen\nfrom paddle import Paddle\n\nscreen = Screen()\nscreen.bgcolor(\"black\")\nscreen.setup(height=600, width=800)\nscreen.title(\"Pong\")\nscreen.tracer(0)\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\n\nscreen.listen()\nscreen.onkey(r_paddle.go_up, \"w\")\nscreen.onkey(r_paddle.go_down, \"s\")\nscreen.onkey(l_paddle.go_up, \"Up\")\nscreen.onkey(l_paddle.go_down, \"Down\")\ngame_on = True\nwhile game_on:\n screen.update()\n\n\n\n\n\n\nscreen.exitonclick()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "turtle.Screen", "line_number": 4, "usage_type": "call"}, {"api_name": "paddle.Paddle", "line_number": 9, "usage_type": "call"}, {"api_name": "paddle.Paddle", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "415624995", "text": "import scrapy\nfrom scrapy_splash import SplashRequest\nfrom bs4 import BeautifulSoup\n\n\n# 爬取地址http://45.76.194.124/news/1.html#\n\nclass NewsSpider(scrapy.Spider):\n name = \"onetwo\"\n\n start_urls = [\n \"http://45.76.194.124/news/1.html#\",\n ]\n\n def start_requests(self):\n for url in self.start_urls:\n yield SplashRequest(url\n , self.parse\n , args={'wait': '10'}\n # , endpoint='render.json'\n )\n\n def parse(self, response):\n content = response.text\n # content = response.xpath('//*[@id=\"mp-editor\"]/p').extract()\n with open(\"124.html\", 'w+', encoding=\"utf-8\") as f:\n for i in content:\n f.write(i)\n soup = BeautifulSoup(open(\"124.html\", \"r+\", encoding=\"utf-8\"), 'html.parser')\n result = soup.find_all(\"td\", {\"id\": \"NewsList\"})\n soup1 = BeautifulSoup(str(result[0]), \"html.parser\")\n news_table = []\n for k, i in enumerate(soup1.find_all(\"tr\")):\n news_info = []\n for j in i.find_all(\"td\"):\n news_info.append(j.text)\n # print(type(j.text))\n for m in j.find_all(\"a\"):\n # print(j.text)\n # print(m.get(\"href\"))\n news_info.append(m.get(\"href\"))\n news_table.append(news_info)\n news_table.pop(0) # 第一个是空的,把它丢掉\n print(news_table)\n\n\n\"\"\"\nnews_table的其中一条数据\n['2018-03-21 21:02:40', 'Compliance and Your Data Center', 'https://www.infosecurity-magazine.com:443/blogs/compliance-data-center/', 'https://www.infosecurity-magazine.com/news/']\n\"\"\"\n", "sub_path": "spiders/newsone.py", "file_name": "newsone.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scrapy_splash.SplashRequest", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "200021098", "text": "import logging\n\nfrom threading import Thread, Event\n\n\nclass Job(Thread):\n def __init__(self, interval, run_on_start, execute, *args, **kwargs):\n Thread.__init__(self)\n self.stopped = Event()\n self.interval = interval\n self.run_on_start = run_on_start\n self.execute = execute\n self.args = args\n self.kwargs = kwargs\n self.logger = logging.getLogger('timeloop')\n\n def stop(self):\n self.stopped.set()\n self.join()\n\n def run(self):\n if self.run_on_start:\n self.logger.info(\"Executing on start: {}\".format(self.execute))\n self.execute(*self.args, **self.kwargs)\n\n while not self.stopped.wait(self.interval.total_seconds()):\n self.logger.info(\"Executing on interval: {}\".format(self.execute))\n self.execute(*self.args, **self.kwargs)\n", "sub_path": "timeloop/job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "threading.Thread", "line_number": 6, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 8, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 8, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "249862627", "text": "import json, socket\r\nfrom modules import query, response, data_structures, cashing, tools\r\n\r\nADDRESS = (\"127.0.0.1\", 53)\r\nCASH_FILE = \"cash.json\"\r\nROOT_SERVERS = (('199.9.14.201', 53),\r\n ('198.41.0.4', 53),\r\n ('199.7.91.13', 53))\r\nQ_TYPES = [1, 2]\r\n\r\n\r\nclass DNSServer:\r\n def __init__(self, forwarder_addr, cash=None, iterative=True):\r\n self.forwarder = forwarder_addr\r\n self.id = 1 # последовательное упрощает атаку отравления кэша\r\n self.cash = cash if cash else cashing.Cash()\r\n self.iterative = iterative\r\n\r\n def execute(self):\r\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as server:\r\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n server.bind(ADDRESS)\r\n server.settimeout(2)\r\n while True:\r\n try:\r\n data, addr = server.recvfrom(1024)\r\n except socket.timeout:\r\n continue\r\n resp = self.process_query(data)\r\n if not resp:\r\n continue\r\n resp_b = response.ResponseHandler.make_response(resp)\r\n server.sendto(resp_b, addr)\r\n\r\n def process_query(self, data):\r\n q_in = query.QueryHandler.parse_query(data)\r\n q_in_id = q_in.header.id\r\n url = q_in.question.url\r\n q_type = q_in.question.q_type\r\n if q_type not in Q_TYPES:\r\n return None\r\n\r\n cash_value = self.cash.get_answer(url, q_type)\r\n print(cash_value)\r\n if cash_value:\r\n return self.construct_response(url, q_type, q_in_id, cash_value)\r\n if self.iterative:\r\n return self.get_response_iterative(url, q_type, q_in_id)\r\n else:\r\n return self.get_response_recurs(url, q_type, q_in_id)\r\n\r\n def get_response_iterative(self, url, q_type, q_in_id):\r\n labels = tools.Tools.get_label_list(url)\r\n current_ns_servers = ROOT_SERVERS\r\n for i in range(len(labels) - 1, -2, -1):\r\n current_url = '.'.join(labels[i:]) if i != -1 else url\r\n q_out = self.construct_query(current_url, 2 if i != -1 else q_type)\r\n q_out_b = query.QueryHandler.make_query(q_out)\r\n for server in current_ns_servers:\r\n data = self.send_query_get_response(q_out_b, server)\r\n if data:\r\n break\r\n server_resp = response.ResponseHandler.parse_response(data)\r\n if server_resp.header.flags.rcode != 0:\r\n return self.construct_response_with_error(q_in_id, server_resp.header.flags.rcode)\r\n self.cash_response(server_resp)\r\n if i != -1:\r\n answer = self.cash.get_answer(current_url, 2)\r\n print(answer)\r\n if answer:\r\n current_ns_servers = [(ns, 53) for ns in self.cash.get_answer(current_url, 2)][:3]\r\n else:\r\n print(current_url, q_type)\r\n answer = self.cash.get_answer(current_url, q_type)\r\n return self.construct_response(url, q_type, q_in_id, answer)\r\n\r\n def get_response_recurs(self, url, q_type, q_in_id):\r\n q_out = self.construct_query(url, q_type)\r\n q_out_b = query.QueryHandler.make_query(q_out)\r\n data = self.send_query_get_response(q_out_b, self.forwarder)\r\n if not data:\r\n return None\r\n forwarder_resp = response.ResponseHandler.parse_response(data)\r\n if forwarder_resp.header.flags.rcode != 0:\r\n return self.construct_response_with_error(q_in_id, forwarder_resp.header.flags.rcode)\r\n self.cash_response(forwarder_resp)\r\n cash_value = self.cash.get_answer(url, q_type)\r\n if not cash_value:\r\n cash_value = []\r\n return self.construct_response(url, q_type, q_in_id, cash_value)\r\n\r\n def send_query_get_response(self, query_b, address):\r\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as client:\r\n client.sendto(query_b, address)\r\n return client.recvfrom(1024)[0]\r\n\r\n def cash_response(self, resp):\r\n for rr in resp.answers_rrs + resp.authority_rrs + resp.additional_rrs:\r\n self.cash.add_record(rr)\r\n with open(CASH_FILE, 'wt') as f:\r\n json.dump(self.cash.make_serializable(), f)\r\n\r\n def construct_response(self, url, q_type, q_id, answers_list):\r\n flags = data_structures.Flags(1, 0, 1, 0, 0)\r\n header = data_structures.Header(q_id, flags, 1, len(answers_list), 0, 0)\r\n question = data_structures.Question(url, q_type)\r\n answers = []\r\n for answer in answers_list:\r\n answers.append(data_structures.ResourceRecord(url, q_type, 60, answer))\r\n return response.Response(header, question, answers, [], [])\r\n\r\n def construct_response_with_error(self, q_id, error):\r\n flags = data_structures.Flags(1, 0, 1, 0, error)\r\n header = data_structures.Header(q_id, flags, 0, 0, 0, 0)\r\n return response.Response(header, None, [], [], [])\r\n\r\n def construct_query(self, url, q_type):\r\n flags = data_structures.Flags(0, 0, 1, 0, 0)\r\n header = data_structures.Header(self.id, flags, 1, 0, 0, 0)\r\n question = data_structures.Question(url, q_type)\r\n return query.Query(header, question)\r\n\r\n\r\ndef main():\r\n with open('config.txt', 'rt') as g:\r\n lines = g.readlines()\r\n addr = lines[0]\r\n forwarder = (addr, 53)\r\n with open(CASH_FILE, 'rt') as f:\r\n cash_j = json.load(f)\r\n cash = cashing.Cash.get_cash_from_json(cash_j)\r\n server = DNSServer(forwarder, cash, iterative=True)\r\n server.execute()\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n pass\r\n", "sub_path": "dns_server/dns_server.py", "file_name": "dns_server.py", "file_ext": "py", "file_size_in_byte": 5852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "modules.cashing.Cash", "line_number": 16, "usage_type": "call"}, {"api_name": "modules.cashing", "line_number": 16, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 20, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 20, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 20, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 21, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 27, "usage_type": "attribute"}, {"api_name": "modules.response.ResponseHandler.make_response", "line_number": 32, "usage_type": "call"}, {"api_name": "modules.response.ResponseHandler", "line_number": 32, "usage_type": "attribute"}, {"api_name": "modules.response", "line_number": 32, "usage_type": "name"}, {"api_name": "modules.query.QueryHandler.parse_query", "line_number": 36, "usage_type": "call"}, {"api_name": "modules.query.QueryHandler", "line_number": 36, "usage_type": "attribute"}, {"api_name": "modules.query", "line_number": 36, "usage_type": "name"}, {"api_name": "modules.tools.Tools.get_label_list", "line_number": 53, "usage_type": "call"}, {"api_name": "modules.tools.Tools", "line_number": 53, "usage_type": "attribute"}, {"api_name": "modules.tools", "line_number": 53, "usage_type": "name"}, {"api_name": "modules.query.QueryHandler.make_query", "line_number": 58, "usage_type": "call"}, {"api_name": "modules.query.QueryHandler", "line_number": 58, "usage_type": "attribute"}, {"api_name": "modules.query", "line_number": 58, "usage_type": "name"}, {"api_name": "modules.response.ResponseHandler.parse_response", "line_number": 63, "usage_type": "call"}, {"api_name": "modules.response.ResponseHandler", "line_number": 63, "usage_type": "attribute"}, {"api_name": "modules.response", "line_number": 63, "usage_type": "name"}, {"api_name": "modules.query.QueryHandler.make_query", "line_number": 79, "usage_type": "call"}, {"api_name": "modules.query.QueryHandler", "line_number": 79, "usage_type": "attribute"}, {"api_name": "modules.query", "line_number": 79, "usage_type": "name"}, {"api_name": "modules.response.ResponseHandler.parse_response", "line_number": 83, "usage_type": "call"}, {"api_name": "modules.response.ResponseHandler", "line_number": 83, "usage_type": "attribute"}, {"api_name": "modules.response", "line_number": 83, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 93, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 93, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 93, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 101, "usage_type": "call"}, {"api_name": "modules.data_structures.Flags", "line_number": 104, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 104, "usage_type": "name"}, {"api_name": "modules.data_structures.Header", "line_number": 105, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 105, "usage_type": "name"}, {"api_name": "modules.data_structures.Question", "line_number": 106, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 106, "usage_type": "name"}, {"api_name": "modules.data_structures.ResourceRecord", "line_number": 109, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 109, "usage_type": "name"}, {"api_name": "modules.response.Response", "line_number": 110, "usage_type": "call"}, {"api_name": "modules.response", "line_number": 110, "usage_type": "name"}, {"api_name": "modules.data_structures.Flags", "line_number": 113, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 113, "usage_type": "name"}, {"api_name": "modules.data_structures.Header", "line_number": 114, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 114, "usage_type": "name"}, {"api_name": "modules.response.Response", "line_number": 115, "usage_type": "call"}, {"api_name": "modules.response", "line_number": 115, "usage_type": "name"}, {"api_name": "modules.data_structures.Flags", "line_number": 118, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.data_structures.Header", "line_number": 119, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 119, "usage_type": "name"}, {"api_name": "modules.data_structures.Question", "line_number": 120, "usage_type": "call"}, {"api_name": "modules.data_structures", "line_number": 120, "usage_type": "name"}, {"api_name": "modules.query.Query", "line_number": 121, "usage_type": "call"}, {"api_name": "modules.query", "line_number": 121, "usage_type": "name"}, {"api_name": "json.load", "line_number": 130, "usage_type": "call"}, {"api_name": "modules.cashing.Cash.get_cash_from_json", "line_number": 131, "usage_type": "call"}, {"api_name": "modules.cashing.Cash", "line_number": 131, "usage_type": "attribute"}, {"api_name": "modules.cashing", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "44372395", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport logging.handlers\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom tensorflow.keras.layers import Dense, Flatten\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import SGD\n\nfrom creat_train_dataset import ImageCreatTrainDataset\nfrom files_tools import save_json_file\n\nPYTHON_LOGGER = logging.getLogger(__name__)\nif not os.path.exists(\"log\"):\n os.mkdir(\"log\")\nHDLR = logging.handlers.TimedRotatingFileHandler(\"log/model1.log\",\n when=\"midnight\", backupCount=60)\nSTREAM_HDLR = logging.StreamHandler()\nFORMATTER = logging.Formatter(\"%(asctime)s %(filename)s [%(levelname)s] %(message)s\")\nHDLR.setFormatter(FORMATTER)\nSTREAM_HDLR.setFormatter(FORMATTER)\nPYTHON_LOGGER.addHandler(HDLR)\nPYTHON_LOGGER.addHandler(STREAM_HDLR)\nPYTHON_LOGGER.setLevel(logging.DEBUG)\n\n# Absolute path to the folder location of this python file\nFOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))\nDATASET = os.path.join(FOLDER_ABSOLUTE_PATH, \"dog_cat_dataset\")\nIMG_DIM = 32\nEPOCHS = 20\nBATCH_SIZE = 32\nLEARNING_RATE = 0.01\n\ndataset = ImageCreatTrainDataset(DATASET, IMG_DIM)\n\ndataset.load_dataset()\n\ntrain_x, train_y = dataset.get_train_data()\ntest_x, test_y = dataset.get_test_data()\nlabels, nb_labels = dataset.get_labels()\n\nPYTHON_LOGGER.info(\"First layer dim: {}\".format(IMG_DIM * IMG_DIM * 3))\nmodel = Sequential()\nmodel.add(Flatten(input_shape=(IMG_DIM, IMG_DIM, 3)))\nmodel.add(Dense(IMG_DIM * IMG_DIM * 3, activation=\"relu\"))\nmodel.add(Dense(nb_labels, activation=\"softmax\"))\n\nloss = \"categorical_crossentropy\" if nb_labels > 2 else \"binary_crossentropy\"\n\nsgd = SGD(LEARNING_RATE)\nmodel.compile(loss=loss, optimizer=sgd, metrics=[\"accuracy\"])\nH = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=EPOCHS, batch_size=BATCH_SIZE)\n\n# evaluate the network\n\nPYTHON_LOGGER.info(\"Evaluating network\")\npredictions = model.predict(test_x, batch_size=BATCH_SIZE)\nprint(classification_report(test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=labels))\n\n# plot the training loss and accuracy\nrange_plot = np.arange(0, EPOCHS)\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(range_plot, H.history[\"loss\"], label=\"train_loss\", color='red')\nplt.plot(range_plot, H.history[\"val_loss\"], label=\"val_loss\", color='green')\nplt.plot(range_plot, H.history[\"accuracy\"], label=\"train_acc\", color='blue')\nplt.plot(range_plot, H.history[\"val_accuracy\"], label=\"val_acc\", color='pink')\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()\n\nsave_json_file({\"img_dim\": IMG_DIM, \"labels\": labels}, \"model_1.json\")\nmodel.save(\"model_1.h5\")\n", "sub_path": "model1.py", "file_name": "model1.py", "file_ext": "py", "file_size_in_byte": 2854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.handlers.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.handlers.handlers.TimedRotatingFileHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.handlers.handlers", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.handlers", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.handlers.StreamHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 24, "usage_type": "name"}, {"api_name": "logging.handlers.Formatter", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.handlers.DEBUG", "line_number": 30, "usage_type": "attribute"}, {"api_name": "logging.handlers", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "creat_train_dataset.ImageCreatTrainDataset", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "files_tools.save_json_file", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "157601671", "text": "import keras\nimport numpy as np\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nfrom keras.utils import plot_model\n\nMODEL_PATH = 'LENET-5CNN.h5'\nPIC_FOLDER = 'C:/Users/Hsinyao/Desktop//Keras/pic/'\n\ndef preprocess_image(IMG):\n img = Image.open(IMG)\n img = img.resize((28, 28), Image.ANTIALIAS)\n im_arr = np.array(img.convert('L'))\n for i in range(28):\n for j in range(28):\n im_arr[i][j] = 255 - im_arr[i][j]\n if (im_arr[i][j] > 25):\n im_arr[i][j] = 255\n else:\n im_arr[i][j] = 0\n im_arr = im_arr.astype(float)\n im_arr /= 255\n im_arr = im_arr.reshape((1, 28, 28, 1))\n return im_arr\n\ndef predict(IMG_FOLDER):\n filenames = os.listdir(IMG_FOLDER)\n for filename in filenames:\n if filename.split('.')[-1] == 'png':\n img_array = preprocess_image(IMG_FOLDER + filename)\n model = keras.models.load_model(MODEL_PATH)\n plot_model(model, to_file='HsinyaoCNN.png', show_layer_names=True, show_shapes=True)\n predict_value = model.predict(img_array)\n print(np.argmax(predict_value))\n\npredict(PIC_FOLDER)", "sub_path": "my_keras_app.py", "file_name": "my_keras_app.py", "file_ext": "py", "file_size_in_byte": 1164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 32, "usage_type": "attribute"}, {"api_name": "keras.utils.plot_model", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "440408189", "text": "# =============================================================================\n# Authors: PAR Government\n# Organization: DARPA\n#\n# Copyright (c) 2016 PAR Government\n# All rights reserved.\n# ==============================================================================\n\nfrom maskgen.tool_set import getMilliSecondsAndFrameCount\nimport cv2\nfrom maskgen.algorithms.optical_flow import smartAddFrames\nfrom maskgen.tool_set import getDurationStringFromMilliseconds\n\n\n\"\"\"\nReturns the start and end time of the frames added\n\"\"\"\n\ndef transform(img,source,target,**kwargs):\n start_time = getMilliSecondsAndFrameCount(kwargs['Start Time']) if 'Start Time' in kwargs else (0,1)\n end_time = getMilliSecondsAndFrameCount(kwargs['End Time']) if 'End Time' in kwargs else None\n frames_add = int(kwargs['Frames to Add']) if 'Frames to Add' in kwargs else None\n if frames_add is not None:\n end_time = (start_time[0],start_time[1] + frames_add - 1)\n codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'\n add_frames, end_time_millis = smartAddFrames(source, target,\n start_time,\n end_time,\n codec=codec,\n direction=kwargs['Direction'] if 'Direction' in kwargs else 'forward')\n\n\n if start_time[0] > 0:\n et = getDurationStringFromMilliseconds(end_time_millis)\n else:\n et = str(int(start_time[1]) + int(add_frames) - 1)\n\n return {'Start Time':str(kwargs['Start Time']),\n 'End Time': et,\n 'Frames to Add': int(add_frames),\n 'Method': 'Pixel Motion',\n 'Algorithm':'Farneback',\n 'scale':0.8,\n 'levels':7,\n 'winsize':15,\n 'iterations': 3,\n 'poly_n':7,\n 'poly_sigma':1.5,\n 'Vector Detail':100},None\n\ndef suffix():\n return '.avi'\n\n\ndef operation():\n return {'name':'TimeAlterationWarp',\n 'category':'TimeAlteration',\n 'description':'Insert frames using optical flow given a starting point and desired end time.',\n 'software':'OpenCV',\n 'version':cv2.__version__,\n 'arguments': {\n 'Frames to Add': {\n 'type': 'int[0:100000000]',\n 'defaultvalue': 1,\n 'description':'Number of frames since Start Time. overrides or in lieu of an End Time.'\n },\n 'Direction': {\n 'type': 'list',\n 'values':['forward','backward'],\n 'defaultvalue': 'forward',\n 'description': 'Direction of flow.'\n },\n 'codec': {\n 'type': 'list',\n 'values': ['MPEG','XVID','AVC1','HFYU'],\n 'defaultvalue': 'XVID',\n 'description': 'Codec of output video.'\n }\n },\n 'transitions': [\n 'video.video'\n ]\n }\n", "sub_path": "plugins/FlowDrivenVideoTimeWarp/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "maskgen.tool_set.getMilliSecondsAndFrameCount", "line_number": 20, "usage_type": "call"}, {"api_name": "maskgen.tool_set.getMilliSecondsAndFrameCount", "line_number": 21, "usage_type": "call"}, {"api_name": "maskgen.algorithms.optical_flow.smartAddFrames", "line_number": 26, "usage_type": "call"}, {"api_name": "maskgen.tool_set.getDurationStringFromMilliseconds", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.__version__", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "180396234", "text": "import base64\nimport os\nimport sublime\nimport queue\n\nfrom threading import Thread\nfrom .session import Session\nfrom . import formatter\nfrom .client import Client\nfrom ..log import log\n\n\ndef done(response):\n return response.get(\"status\") == [\"done\"]\n\n\ndef b64encode_file(path):\n with open(path, \"rb\") as file:\n return base64.b64encode(file.read()).decode(\"utf-8\")\n\n\nclass Repl(object):\n def __init__(self, window, host, port, options={\"print_capabilities\": True}):\n self.client = Client(host, port).go()\n self.printq = queue.Queue()\n self.tapq = queue.Queue()\n self.options = options\n\n def create_session(self, owner, capabilities, response):\n new_session_id = response[\"new-session\"]\n new_session = Session(new_session_id, self.client)\n new_session.info = capabilities\n self.client.register_session(owner, new_session)\n return new_session\n\n def create_sessions(self, session, response):\n capabilities = response\n session.info = capabilities\n\n if self.options.get(\"print_capabilities\"):\n session.output(response)\n\n session.send(\n {\"op\": \"clone\", \"session\": session.id},\n handler=lambda response: done(response)\n and self.create_session(\"plugin\", capabilities, response),\n )\n\n session.send(\n {\"op\": \"clone\", \"session\": session.id},\n handler=lambda response: done(response)\n and self.create_session(\"user\", capabilities, response),\n )\n\n def handle_sideloader_provide_response(self, session, response):\n if \"status\" in response and \"unexpected-provide\" in response[\"status\"]:\n name = response[\"name\"]\n session.output({\"err\": f\"unexpected provide: {name}\\n\"})\n\n def sideloader_provide(self, session, response):\n if \"name\" in response:\n name = response[\"name\"]\n\n op = {\n \"id\": response[\"id\"],\n \"op\": \"sideloader-provide\",\n \"type\": response[\"type\"],\n \"name\": name,\n }\n\n path = os.path.join(sublime.packages_path(), \"tutkain/clojure/src\", name)\n\n if os.path.isfile(path):\n log.debug({\"event\": \"sideloader/provide\", \"path\": path})\n op[\"content\"] = b64encode_file(path)\n else:\n op[\"content\"] = \"\"\n\n session.send(\n op,\n handler=lambda response: self.handle_sideloader_provide_response(\n session, response\n ),\n )\n\n def describe(self, session):\n def handler(response):\n if done(response):\n self.start_formatter({\"newline_on_done\": False})\n self.create_sessions(session, response)\n\n session.send({\"op\": \"describe\"}, handler=handler)\n\n def add_tap(self, session):\n session.send(\n {\"op\": \"tutkain/add-tap\"},\n handler=lambda response: done(response) and self.describe(session),\n )\n\n def add_middleware(self, session, response):\n if done(response):\n session.send(\n {\n \"op\": \"add-middleware\",\n \"middleware\": [\n \"tutkain.nrepl.middleware.test/wrap-test\",\n \"tutkain.nrepl.middleware.tap/wrap-tap\",\n ],\n },\n handler=lambda response: done(response) and self.add_tap(session),\n )\n elif \"err\" in response:\n session.output(response)\n session.output(\n {\n \"err\": \"\"\"*** [Tutkain] Sideloading failed. See error message above for details. Some features are unavailable. ***\\n\"\"\"\n }\n )\n\n session.send(\n {\"op\": \"clone\"},\n handler=lambda response: done(response)\n and self.initialize_without_sideloader(session.info, response),\n )\n\n def sideload(self, session):\n session.send(\n {\"op\": \"sideloader-start\"},\n handler=lambda response: self.sideloader_provide(session, response),\n )\n\n session.send(\n {\"op\": \"eval\", \"code\": \"\"\"(require 'tutkain.nrepl.util.pprint)\"\"\"},\n pprint=False,\n handler=lambda response: self.add_middleware(session, response),\n )\n\n def start_formatter(self, settings):\n format_loop = Thread(\n daemon=True,\n target=formatter.format_loop,\n args=(\n self.client.recvq,\n self.printq,\n self.tapq,\n settings,\n ),\n )\n\n format_loop.name = \"tutkain.connection.format_loop\"\n format_loop.start()\n\n def initialize_without_sideloader(self, capabilities, response):\n session = self.create_session(\"plugin\", capabilities, response)\n\n if self.options.get(\"print_capabilities\"):\n session.output(capabilities)\n\n def handler(response):\n if done(response):\n self.start_formatter({\"newline_on_done\": True})\n self.create_session(\"user\", capabilities, response)\n\n # Send the clone op via the client instead of the plugin session because some servers do\n # not support sending the op via the session.\n self.client.send({\"op\": \"clone\"}, handler=handler)\n\n def initialize_sessions(self, capabilities, response):\n if \"sideloader-start\" in capabilities[\"ops\"]:\n session = self.create_session(\"sideloader\", capabilities, response)\n self.sideload(session)\n else:\n self.initialize_without_sideloader(capabilities, response)\n\n def clone(self, capabilities):\n self.client.send(\n {\"op\": \"clone\"},\n handler=lambda response: done(response)\n and self.initialize_sessions(capabilities, response),\n )\n\n def go(self):\n self.client.send(\n {\"op\": \"describe\"},\n handler=lambda response: done(response) and self.clone(response),\n )\n\n return self\n", "sub_path": "src/repl/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "base64.b64encode", "line_number": 19, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 24, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 25, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 26, "usage_type": "call"}, {"api_name": "session.Session", "line_number": 31, "usage_type": "call"}, {"api_name": "session.info", "line_number": 38, "usage_type": "attribute"}, {"api_name": "session.output", "line_number": 41, "usage_type": "call"}, {"api_name": "session.send", "line_number": 43, "usage_type": "call"}, {"api_name": "session.id", "line_number": 44, "usage_type": "attribute"}, {"api_name": "session.send", "line_number": 49, "usage_type": "call"}, {"api_name": "session.id", "line_number": 50, "usage_type": "attribute"}, {"api_name": "session.output", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sublime.packages_path", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "log.log.debug", "line_number": 74, "usage_type": "call"}, {"api_name": "log.log", "line_number": 74, "usage_type": "name"}, {"api_name": "session.send", "line_number": 79, "usage_type": "call"}, {"api_name": "session.send", "line_number": 92, "usage_type": "call"}, {"api_name": "session.send", "line_number": 95, "usage_type": "call"}, {"api_name": "session.send", "line_number": 102, "usage_type": "call"}, {"api_name": "session.output", "line_number": 113, "usage_type": "call"}, {"api_name": "session.output", "line_number": 114, "usage_type": "call"}, {"api_name": "session.send", "line_number": 120, "usage_type": "call"}, {"api_name": "session.info", "line_number": 123, "usage_type": "attribute"}, {"api_name": "session.send", "line_number": 127, "usage_type": "call"}, {"api_name": "session.send", "line_number": 132, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 139, "usage_type": "call"}, {"api_name": "session.output", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "493716852", "text": "#!/usr/bin/env python\nfrom prettytable import PrettyTable\nimport subprocess\nimport json\n\n\ndef shell(cmd):\n sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = sp.communicate()\n return out, err\n\n\ndef get_nodes():\n cmd = \"openstack baremetal node list --long -f json\"\n out, err = shell(cmd)\n if err:\n print('%s error!!:%s' % (cmd, err))\n nodes = json.loads(out)\n return nodes\n\n\ndef get_ports():\n cmd = \"openstack baremetal port list --long -f json\"\n out, err = shell(cmd)\n if err:\n print('%s error!!:%s' % (cmd, err))\n ports = json.loads(out)\n return ports\n\n\ndef port_node_maps():\n nodes = get_nodes()\n ports = get_ports()\n tbl = PrettyTable([\"ipmi_address\", \"mac\", \"switch_id\", \"port_id\"])\n node_ipmis = {}\n for node in nodes:\n uuid = node[\"UUID\"]\n ipmi_address = node[\"Driver Info\"][\"ipmi_address\"]\n node_ipmis[uuid] = ipmi_address\n for port in ports:\n uuid = port[\"Node UUID\"]\n ipmi_address = node_ipmis[uuid]\n mac = port[\"Address\"]\n switch_id = port[\"Local Link Connection\"][\"switch_id\"]\n port_id = port[\"Local Link Connection\"][\"port_id\"]\n tbl.add_row([ipmi_address, mac, switch_id, port_id])\n print(tbl.get_string(sortby=\"ipmi_address\"))\n\n\nif __name__ == '__main__':\n port_node_maps()\n", "sub_path": "ironic/ironic_list/ironic_node_port_list.py", "file_name": "ironic_node_port_list.py", "file_ext": "py", "file_size_in_byte": 1376, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "subprocess.Popen", "line_number": 8, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "64147603", "text": "from flask import Flask\ntry:\n from flask import Blueprint\nexcept ImportError:\n # Blueprints only available starting with 0.7,\n # fall back to old Modules otherwise.\n Blueprint = None\n from flask import Module\nfrom flaskext.assets import Environment, Bundle\n \n\nclass TestUrlAndDirectory(object):\n \"\"\"By default, the 'url' and 'directory' settings of webassets are\n not used in Flask-Assets; that is, the values are automatically\n handled based on the configuration of the Flask app and the modules\n used.\n\n The user can disable the automatic handling by setting these values\n if he needs to for some reason.\n\n Let's test the different scenarios to ensure everything works.\n \"\"\"\n\n def setup(self):\n self.app = Flask(__name__, static_path='/app_static')\n import test_module\n if not Blueprint:\n self.module = Module(test_module.__name__, name='module',\n static_path='/mod_static')\n self.app.register_module(self.module)\n else:\n self.blueprint = Blueprint('module', test_module.__name__,\n static_url_path='/mod_static',\n static_folder='static')\n self.app.register_blueprint(self.blueprint)\n self.env = Environment(self.app)\n\n def config_values_not_set_by_default(self):\n assert not 'directory' in self.env.config\n assert not 'url' in self.env.config\n assert_raises(KeyError, self.env.config.__getitem__, 'directory')\n assert_raises(KeyError, self.env.config.__getitem__, 'url')\n\n def test_directory_auto(self):\n \"\"\"Test how we handle file references if no root 'directory' is\n configured manually.\n \"\"\"\n assert not 'directory' in self.env.config\n root = self.app.root_path\n assert Bundle('foo').get_files(self.env) == [root + '/static/foo']\n # Modules prefixes in paths are handled specifically.\n assert Bundle('module/bar').get_files(self.env) == [root + '/test_module/static/bar']\n # Prefixes that aren't valid module names are just considered\n # subfolders of the main app.\n assert Bundle('nomodule/bar').get_files(self.env) == [root + '/static/nomodule/bar']\n # In case the name of a app-level subfolder conflicts with a\n # module name, you can always use this hack:\n assert Bundle('./module/bar').get_files(self.env) == [root + '/static/module/bar']\n\n def test_directory_custom(self):\n \"\"\"A custom root directory is configured.\"\"\"\n self.env.directory = '/tmp'\n assert Bundle('foo').get_files(self.env) == ['/tmp/foo']\n # We do not recognize references to modules.\n assert Bundle('module/bar').get_files(self.env) == ['/tmp/module/bar']\n\n def test_url_auto(self):\n \"\"\"Test how urls are generated if no 'url' is configured manually.\n \"\"\"\n assert not 'url' in self.env.config\n\n assert Bundle('foo').urls(self.env) == ['/app_static/foo']\n # Urls for files that point to a module use that module's url prefix.\n assert Bundle('module/bar').urls(self.env) == ['/mod_static/bar']\n # Try with a prefix that's not actually a valid module\n assert Bundle('nomodule/bar').urls(self.env) == ['/app_static/nomodule/bar']\n\n def test_url_custom(self):\n \"\"\"A custom root url is configured.\"\"\"\n self.env.url = '/media'\n assert Bundle('foo').urls(self.env) == ['/media/foo']\n # We do not recognize references to modules.\n assert Bundle('module/bar').urls(self.env) == ['/media/module/bar']\n\n def test_existing_request_object_used(self):\n \"\"\"[Regression] Check for a bug where the url generation code of\n Flask-Assets always added a dummy test request to the context stack,\n instead of using the existing one if there is one.\n\n We test this by making the context define a custom SCRIPT_NAME\n prefix, and then we check if it affects the generated urls, as\n it should.\n \"\"\"\n with self.app.test_request_context(\n '/', environ_overrides={'SCRIPT_NAME': '/yourapp'}):\n assert Bundle('foo').urls(self.env) == ['/yourapp/app_static/foo']\n", "sub_path": "tests/test_integration.py", "file_name": "test_integration.py", "file_ext": "py", "file_size_in_byte": 4296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.Module", "line_number": 28, "usage_type": "call"}, {"api_name": "test_module.__name__", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.Blueprint", "line_number": 32, "usage_type": "call"}, {"api_name": "test_module.__name__", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flaskext.assets.Environment", "line_number": 36, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 50, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 52, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 55, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 58, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 63, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 65, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 72, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 74, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 76, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 81, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 83, "usage_type": "call"}, {"api_name": "flaskext.assets.Bundle", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "429046196", "text": "from django import forms\n\nfrom .models import \\\n PartnerSet, \\\n TransactionSet, \\\n ProductSet, \\\n ProductItem\n\n\nclass NewClientForm(forms.ModelForm):\n\n class Meta:\n model = PartnerSet\n fields = ('name', 'code')\n\n\nclass NewTransactionForm(forms.ModelForm):\n\n class Meta:\n model = ProductItem\n # items = ProductItem.objects.all()\n\n\n fields = ('code', 'name')\n # fields = ('m1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9', 'm10',\n # 'w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8', 'w9', 'w10')\n # widgets = {\n # 'myfield': forms.TextInput(attrs={'class': 'myfieldclass'}),\n # }\n\n\nclass NewStorageForm(forms.ModelForm):\n name = forms.CharField(initial='Storage name')\n _fields = [name]\n items = ProductItem.objects.all()\n for item in items:\n item_field = forms.CharField(max_length=6, name=item.code, label=item.code)\n _fields.append(item_field)\n\n fields = tuple(_fields)\n\n", "sub_path": "app/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.forms.ModelForm", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "models.PartnerSet", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "models.ProductItem", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "models.ProductItem.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.ProductItem.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.ProductItem", "line_number": 35, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "394048038", "text": "#!/usr/bin/env python\n\nimport pandas as pd\nimport sys\nimport os\nimport argparse\nimport math\nimport os\nimport altair as alt\nimport pandas as pd\nimport numpy as np\nimport yaml\nimport glob\nfrom yaml import Loader, Dumper\ndef generic_df_reader(args):\n\tif \"npz\" == args.input.split(\".\")[-1]:\n\t\tnpz = np.load('result.npz')\n\t\tdf = pd.DataFrame(npz['matrix'])\n\t\tdf.columns = npz['labels']\n\t\treturn df\n\tif args.sep==\"auto\":\n\t\targs.sep = guess_sep(args.input)\n\tif args.header:\n\t\tif args.index:\n\t\t\tdf = pd.read_csv(args.input,sep=args.sep,index_col=0)\n\t\telse:\n\t\t\tdf = pd.read_csv(args.input,sep=args.sep)\n\telse:\n\t\tif args.index:\n\t\t\tdf = pd.read_csv(args.input,sep=args.sep,index_col=0,header=None)\n\t\telse:\n\t\t\tdf = pd.read_csv(args.input,sep=args.sep,header=None)\n\treturn df\ndef guess_sep(x):\n\twith open(x) as f:\n\t\tfor line in f:\n\t\t\ttmp1 = len(line.strip().split(\",\"))\n\t\t\ttmp2 = len(line.strip().split(\"\\t\"))\n\t\t\t# print (tmp1,tmp2)\n\t\t\tif tmp1 > tmp2:\n\t\t\t\treturn \",\"\n\t\t\tif tmp2 > tmp1: \n\t\t\t\treturn \"\\t\"\n\t\t\telse:\n\t\t\t\tprint (\"Can't determine the separator. Please input manually\")\n\t\t\t\texit()\n\t\t\t\t\ndef zoom_bar(data, zoom_bar_color_by, zoom_bar_title,zoom_width,zoom_bar_x_col,zoom_bar_x_order,color_min_v,color_max_v):\n\t\"\"\"Create one layer heatmap for zoom bar.\n\tParameters\n\t----------\n\tdata :pandas.DataFrame\n\t\tData frame with site and metric value.\n\tzoom_bar_color_by : str\n\t\tColumn in `data` with values to color by.\n\ttitle : str\n\t\tTitle of the plot.\n\tReturns\n\t-------\n\taltair.Chart\n\t\"\"\"\n\tzoom_brush = alt.selection_interval(encodings=['x'], mark=alt.BrushConfig(stroke='black',strokeWidth=2))\n\tzoom = (alt.Chart(data)\n\t\t\t.mark_rect()\n\t\t\t.encode(x=alt.X(f'{zoom_bar_x_col}:O',\n\t\t\t\t\t\t sort=zoom_bar_x_order),\n\t\t\t\t\tcolor=alt.Color(zoom_bar_color_by, \n\t\t\t\t\t\t\t\t\t scale=alt.Scale(scheme='greys', \n\t\t\t\t\t\t\t\t\t\t\t\t\t domain=[color_min_v,color_max_v]),\n\t\t\t\t\t\t\t\t\tlegend=alt.Legend(orient='left',\n\t\t\t\t\t\t\t\t\t\t\t\t\t labelFontSize=15,\n\t\t\t\t\t\t\t\t\t\t\t\t\t titleFontSize=16,\n\t\t\t\t\t\t\t\t\t\t\t\t\t title=zoom_bar_title)))\n\t\t\t.add_selection(zoom_brush)\n\t\t\t.properties(width=zoom_width,\n\t\t\t\t\t\ttitle='zoom bar'))\n\treturn zoom,zoom_brush\ndef DMS_heatmaps(data,tooltips,heatmap_color_by,heatmap_x_col,heatmap_x_order,heatmap_y_col,heatmap_y_order,color_min_v,color_max_v,heatmap_star_annotation_col,heatmap_height,zoom_brush):\n\t\"\"\"Create main heatmap for one condition.\n\tThe heatmap is the results of three layers.\n\t*heatmap* is the main DMS data\n\t*wildtype* marks wildtype data with an 'x'\n\t*nulls* creates grey cells for missing data.\n\tIf you exclude nulls, missing data is white, \n\twhich is appropriate for some color schemes\n\tbut not all.\n\tParameters\n\t----------\n\tdata :pandas.DataFrame\n\t\tMain dataframe\n\theatmap_color_by : str\n\t\tColumn in `data` with values to color by.\n\ttooltips : list\n\t\tColumn values to show when mouse hover\n\tReturns\n\t-------\n\taltair.Chart\n\t\"\"\"\n\tcell_selector = alt.selection_single(on='mouseover',empty='none')\n\t# zoom_brush = alt.selection_interval(encodings=['x'], mark=alt.BrushConfig(stroke='black',strokeWidth=2))\n\t# tmp = data.sort_values(\"pos2\")\n\t# tmp = tmp.drop_duplicates(\"pos\")\n\t# pos_oder = tmp.pos.tolist()\n\t# tooltips = ['mutation','log2FoldChange','pvalue','padj']\n\t# everything is site v mutant\n\tbase = (alt.Chart(data)\n\t\t\t.encode(x=alt.X(f'{heatmap_x_col}:O',\n\t\t\t\t\t\t\t sort=heatmap_x_order,\n\t\t\t\t\t\t\t axis=alt.Axis(titleFontSize=15)),\n\t\t\t\t\ty=alt.Y(f'{heatmap_y_col}:O',\n\t\t\t\t\t\t\t sort=heatmap_y_order,\n\t\t\t\t\t\t\taxis=alt.Axis(labelFontSize=12,\n\t\t\t\t\t\t\t\t\t\t titleFontSize=15))\n\t\t\t\t )\n\t\t )\n\theatmap = (base\n\t\t\t .mark_rect()\n\t\t\t .encode(color=alt.Color(heatmap_color_by,\n\t\t\t\t\t\t\t\t\t type='quantitative', \n\t\t\t\t\t\t\t\t\t scale=alt.Scale(range=[\"#0505ff\",'#afecfa', \"#fafafa\",\"#fff6c2\", \"#fc0303\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t type=\"linear\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t exponent=4,\n\t\t\t\t\t\t\t\t\t\t\t\t\t domain=[color_min_v,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t color_max_v],\n\t\t\t\t\t\t\t\t\t\t\t\t\t ),\n\t\t\t\t\t\t\t\t\t legend=alt.Legend(orient='left',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tgradientLength=100)),\n\t\t\t\t\t stroke=alt.value('black'),\n\t\t\t\t\t strokeWidth=alt.condition(cell_selector,\n\t\t\t\t\t\t\t\t\t\t\t\t alt.value(2),\n\t\t\t\t\t\t\t\t\t\t\t\t alt.value(0)),\n\t\t\t\t\t tooltip=tooltips\n\t\t\t\t\t )\n\t\t\t )\n\t\n\ttext = base.mark_text(color='black').encode(\n\t\ttext=f'{heatmap_star_annotation_col}:N'\n\t\t)\n\tnulls = (base\n\t\t\t .mark_rect()\n\t\t\t .transform_filter(f\"!isValid(datum.{heatmap_color_by})\")\n\t\t\t .mark_rect(opacity=0.5)\n\t\t\t .encode(alt.Color(f'{heatmap_color_by}:N',\n\t\t\t\t\t\t\t scale=alt.Scale(scheme='greys'),\n\t\t\t\t\t\t\t legend=None)\n\t\t\t\t\t)\n\t\t\t)\n\t\n\treturn ((heatmap + nulls +text)\n\t\t\t.interactive()\n\t\t\t.add_selection(cell_selector) # mouse over highlighting\n\t\t\t.transform_filter(zoom_brush) # add zoom bar filtering\n\t\t\t.properties(height=heatmap_height, title=' '.join(heatmap_color_by.split('_'))))\ndef my_args():\n\tmainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tmainParser.add_argument('-f',\"--input\", help=\"data table to be plot\",required=True)\n\tmainParser.add_argument('-o',\"--output\", help=\"output visualization html file\",required=True)\n\tmainParser.add_argument(\"--reformat_config\", help=\"reformat data table\",default=None)\n\tmainParser.add_argument('--header', help=\"data table has header\", action='store_true')\n\tmainParser.add_argument('--index', help=\"data table has index\", action='store_true')\n\tmainParser.add_argument('--sep', help=\"data table separator\", default=\"auto\")\n\n\t# mainParser.add_argument('-s',\"--sample_list\", help=\"table rows, a list of samples, these are supposed to be folder names, one column\",required=True)\n\t# mainParser.add_argument('-f','--feature_list', help=\"table columns, map file name to specific feature name\",required=True)\n\t# mainParser.add_argument('--softlinks', help=argparse.SUPPRESS,default=\"\")\n\t# mainParser.add_argument('--treatment_bam', help=argparse.SUPPRESS)\n\t# mainParser.add_argument('--port', help=argparse.SUPPRESS)\n\n\t##------- add parameters above ---------------------\n\targs = mainParser.parse_args()\t\n\treturn args\n\ndef parse_file_kasey(f):\n df = pd.read_csv(f,sep=\"\\t\")\n df['pos'] = [x[:-1] for x in df.mutation]\n df['pos2'] = [int(x[1:-1]) for x in df.mutation]\n df['mutant'] = [x[-1] for x in df.mutation]\n df['sig'] = df.apply(lambda r:abs(r.log2FoldChange)>1 and r.BF,axis=1)\n df['BF'] = df.BF.map({True:\"*\",False:\"\"})\n df.sig = df.pos.map(df.groupby(\"pos\")['sig'].sum().to_dict())\n return df\n\ndef get_plot_parameters(f):\n\tif not os.path.isfile(f):\n\t\tprint (f\"{f} not exist\")\n\t\texit()\n\treturn yaml.load(open(f),Loader=Loader)\n\nargs = my_args()\nif args.reformat_config == \"kasey\":\n df = parse_file_kasey(args.input)\n args.reformat_config = \"/home/yli11/HemTools/share/misc/interactive_heatmap.kasey.yaml\"\nelse:\n df = generic_df_reader(args)\n\n# plot parameters and pre-process some variables, such as x-order\nplot_parameters = get_plot_parameters(args.reformat_config)\n# print (plot_parameters)\nglobals().update(plot_parameters)\n# print(globals())\n# print (tooltips)\ntooltips = tooltips.split(\",\")\nzoom_bar_x_order,ascending = zoom_bar_x_order.split(\",\")\nzoom_bar_x_order = df.sort_values(zoom_bar_x_order,ascending=int(ascending)).drop_duplicates(zoom_bar_x_col)[zoom_bar_x_col].tolist()\n\nheatmap_x_order,ascending = heatmap_x_order.split(\",\")\nheatmap_x_order = df.sort_values(heatmap_x_order,ascending=int(ascending)).drop_duplicates(heatmap_x_col)[heatmap_x_col].tolist()\n\nheatmap_y_order,ascending = heatmap_y_order.split(\",\")\nheatmap_y_order = df.sort_values(heatmap_y_order,ascending=int(ascending)).drop_duplicates(heatmap_y_col)[heatmap_y_col].tolist()\n\nif heatmap_star_annotation_col==\"\":\n df['empty'] = \"\"\n heatmap_star_annotation_col = \"empty\"\n\n\n# main functions\nzoom,zoom_brush = zoom_bar(df, zoom_bar_color_by, zoom_bar_title,zoom_width,zoom_bar_x_col,zoom_bar_x_order,zoom_bar_color_min_v,zoom_bar_color_max_v)\nexpression = DMS_heatmaps(df, tooltips,heatmap_color_by,heatmap_x_col,heatmap_x_order,heatmap_y_col,heatmap_y_order,heatmap_color_min_v,heatmap_color_max_v,heatmap_star_annotation_col,heatmap_height,zoom_brush)\n\n# save chart\nchart = (alt.vconcat(zoom, expression, spacing=0)\n .configure_title(anchor='start',\n fontSize=20))\nchart.save(args.output)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "bin/interactive_heatmap.py", "file_name": "interactive_heatmap.py", "file_ext": "py", "file_size_in_byte": 8164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "altair.selection_interval", "line_number": 62, "usage_type": "call"}, {"api_name": "altair.BrushConfig", "line_number": 62, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 63, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 65, "usage_type": "call"}, {"api_name": "altair.Color", "line_number": 67, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 68, "usage_type": "call"}, {"api_name": "altair.Legend", "line_number": 70, "usage_type": "call"}, {"api_name": "altair.selection_single", "line_number": 99, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 106, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 107, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 109, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 110, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 112, "usage_type": "call"}, {"api_name": "altair.Color", "line_number": 118, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 120, "usage_type": "call"}, {"api_name": "altair.Legend", "line_number": 126, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 128, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 129, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 130, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 131, "usage_type": "call"}, {"api_name": "altair.Color", "line_number": 143, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 144, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 155, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 187, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 187, "usage_type": "name"}, {"api_name": "altair.vconcat", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "408478612", "text": "from flask import Flask, redirect, render_template, request, url_for, send_from_directory\nfrom datetime import datetime\nfrom contact import Contact\nfrom user import User\nfrom database import database\nimport os\nimport logging\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.DEBUG)\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico')\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/register', methods=['GET'])\ndef register_get():\n return render_template('register.html')\n\n\n@app.route('/register', methods=['POST'])\ndef register_post():\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n values = (\n None,\n request.form['username'],\n request.form['number'],\n User.hash_password(request.form['password']),\n timestamp\n )\n # i = User(*values).create()\n i = database.create_user(User(*values))\n # if i == 0:\n # return redirect('/register')\n return redirect('/')\n\n\n@app.route('/login', methods=['GET'])\ndef login_get():\n return render_template('login.html')\n\n\n@app.route('/login', methods=['POST'])\ndef login_post():\n username = request.form['username']\n password = request.form['password']\n user = database.get_user_by_name(username)\n # number = User.find_by_number(auth)\n if user is not None:\n if user.verify_password(password) is True:\n return redirect(url_for('display_contacts', user_id=user.get_id()))\n else:\n return 'Unsuccessful login'\n # return redirect('/login')\n # elif number is not None:\n # if number.verify_password(password) is True:\n # return redirect(url_for('display_contacts', user_id=number.id))\n # else:\n # return redirect('/login')\n # TODO upgrade this\n return 'No such user found. Have you registered?'\n\n\n@app.route('/contacts/', methods=['GET'])\ndef display_contacts(user_id):\n user = database.get_user_by_id(user_id)\n ping(user)\n contacts = database.get_contacts_by_user_id(user)\n\n if contacts is None:\n app.logger.info('No contacts with this user id')\n return render_template('contacts.html', user_id=user.get_id())\n elif contacts is False:\n app.logger.error('Error getting contact by user id')\n return 'Error getting contact by user id'\n app.logger.info('At least 1 contact with this user id exists')\n return render_template('contacts.html', contacts=contacts, user_id=user.get_id())\n\n\n@app.route('/contacts//create', methods=['GET'])\ndef create_contact_get(user_id):\n user = database.get_user_by_id(user_id)\n ping(user)\n return render_template('create_contact.html', user_id=user.get_id())\n\n\n@app.route('/contacts//create', methods=['POST'])\ndef create_contact_post(user_id):\n user = database.get_user_by_id(user_id)\n ping(user)\n values = (None, request.form['Name'], request.form['Number'], request.form['Note'], user.get_id())\n database.create_contact(Contact(*values))\n return redirect(url_for('display_contacts', user_id=user.get_id()))\n\n\n@app.route('/contacts//', methods=['GET'])\ndef display_contact(user_id, contact_id):\n user = database.get_user_by_id(user_id)\n ping(user)\n contact = database.get_contact_by_id(contact_id)\n if contact is None:\n app.logger.error('No contact with this id.')\n return render_template('contact.html', user_id=user.get_id(), contact=contact)\n\n\n@app.route('/contacts//', methods=['POST'])\ndef update_contact(user_id, contact_id):\n user = database.get_user_by_id(user_id)\n ping(user)\n contact = database.get_contact_by_id(contact_id)\n try:\n if request.form['Update_button'] is not None:\n values = (contact_id, request.form['Name'], request.form['Number'], request.form['Note'], user.get_id())\n database.update_contact(Contact(*values))\n except KeyError:\n app.logger.info('KeyError exception encountered when updating contact.')\n try:\n if request.form['Delete_button'] is not None:\n database.delete_contact(database.get_contact_by_id(contact_id))\n except KeyError:\n app.logger.error('KeyError exception encountered when deleting contact.')\n except:\n app.logger.error('Unidentified exception encountered when deleting contact.')\n except:\n app.logger.info('Unidentified exception encountered when updating contact.')\n return redirect(url_for('display_contacts', user_id=user.get_id()))\n\n\n@app.route('/contacts//myinfo')\ndef display_user_info(user_id):\n user = database.get_user_by_id(user_id)\n if user is None or False:\n # TODO check if we ever enter here\n return 'error'\n username = user.get_name()\n number = user.get_number()\n return render_template('user_info.html', user=user, username=username, number=number)\n\n\ndef ping(user):\n database.ping(user)\n\n\nif __name__ == \"__main__\":\n app.run()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask.send_from_directory", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "user.User.hash_password", "line_number": 35, "usage_type": "call"}, {"api_name": "user.User", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "database.database.create_user", "line_number": 39, "usage_type": "call"}, {"api_name": "database.database", "line_number": 39, "usage_type": "name"}, {"api_name": "user.User", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "database.database.get_user_by_name", "line_number": 54, "usage_type": "call"}, {"api_name": "database.database", "line_number": 54, "usage_type": "name"}, {"api_name": "user.verify_password", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 58, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 58, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 73, "usage_type": "call"}, {"api_name": "database.database", "line_number": 73, "usage_type": "name"}, {"api_name": "database.database.get_contacts_by_user_id", "line_number": 75, "usage_type": "call"}, {"api_name": "database.database", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 84, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 89, "usage_type": "call"}, {"api_name": "database.database", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 91, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 96, "usage_type": "call"}, {"api_name": "database.database", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "user.get_id", "line_number": 98, "usage_type": "call"}, {"api_name": "database.database.create_contact", "line_number": 99, "usage_type": "call"}, {"api_name": "database.database", "line_number": 99, "usage_type": "name"}, {"api_name": "contact.Contact", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 100, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 100, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 105, "usage_type": "call"}, {"api_name": "database.database", "line_number": 105, "usage_type": "name"}, {"api_name": "database.database.get_contact_by_id", "line_number": 107, "usage_type": "call"}, {"api_name": "database.database", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 110, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 115, "usage_type": "call"}, {"api_name": "database.database", "line_number": 115, "usage_type": "name"}, {"api_name": "database.database.get_contact_by_id", "line_number": 117, "usage_type": "call"}, {"api_name": "database.database", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "user.get_id", "line_number": 120, "usage_type": "call"}, {"api_name": "database.database.update_contact", "line_number": 121, "usage_type": "call"}, {"api_name": "database.database", "line_number": 121, "usage_type": "name"}, {"api_name": "contact.Contact", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 125, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 125, "usage_type": "name"}, {"api_name": "database.database.delete_contact", "line_number": 126, "usage_type": "call"}, {"api_name": "database.database", "line_number": 126, "usage_type": "name"}, {"api_name": "database.database.get_contact_by_id", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 133, "usage_type": "call"}, {"api_name": "user.get_id", "line_number": 133, "usage_type": "call"}, {"api_name": "database.database.get_user_by_id", "line_number": 138, "usage_type": "call"}, {"api_name": "database.database", "line_number": 138, "usage_type": "name"}, {"api_name": "user.get_name", "line_number": 142, "usage_type": "call"}, {"api_name": "user.get_number", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 144, "usage_type": "call"}, {"api_name": "database.database.ping", "line_number": 148, "usage_type": "call"}, {"api_name": "database.database", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "610188336", "text": "#!/usr/bin/env python\r\ntry:\r\n\timport tkinter\r\n\tfrom tkinter import ttk\r\n\tfrom tkinter import *\r\nexcept ImportError:\r\n\timport Tkinter\r\n\tfrom Tkinter import ttk\r\n\tfrom Tkinter import *\r\n\r\nimport cv2\r\nimport PIL.Image, PIL.ImageTk\r\n\r\nimport numpy as np\r\nfrom keras.models import model_from_json\r\nfrom keras.preprocessing import image\r\n\r\n# load model\r\nmodel = model_from_json(open(\"fer.json\", \"r\").read())\r\n# load weights\r\nmodel.load_weights('fer.h5')\r\n\r\nface_haar_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nclass App:\r\n\tdef __init__(self, window, window_title, video_source=0):\r\n\t\tself.window = window\r\n\t\tself.window.title(window_title)\r\n\t\tself.video_source = video_source\r\n\t\t\r\n\t\t# open video source (by default this will try to open the computer webcam)\r\n\t\tself.vid = MyVideoCapture(self.video_source)\r\n\t\t\t\t\r\n\t\tframe0 = Frame(self.window, width=800, height=600, bd=1)\r\n\t\tframe0.pack()\r\n\r\n\t\tframe1 = Frame(frame0, bd=2, relief=RAISED)\r\n\t\tframe1.pack(expand=1, fill=X, pady=10, padx=5)\r\n\t\t\r\n\t\tcanvas1 = Canvas(frame1, bg='yellow', width=800, height=20)\r\n\t\tcanvas1.pack()\r\n\t\t\r\n\t\tself.canvas = tkinter.Canvas(frame1, width=400, height=300)\r\n\t\tself.canvas.pack(padx=5, pady=10, side=tkinter.LEFT, anchor=NW)\r\n\t\t\t\t\t\r\n\t\tcanvas1.create_text(400, 10, text='NonLutte - Facial Expression Recognition App', font=('verdana', 20, 'bold'))\r\n\t\t\r\n\t\tcanvas2 = Canvas(frame1, bg='gray', width=400, height=300)\r\n\t\tcanvas2.create_text(75, 20, text='Video feed unavailable', font=('verdana', 10, 'bold'))\r\n\t\tcanvas2.pack(padx=5, pady=10, side=tkinter.LEFT)\r\n\t\t\r\n\t\tcanvas3 = Canvas(frame1, bg='gray', width=400, height=300)\r\n\t\tcanvas3.create_text(75, 20, text='Video feed unavailable', font=('verdana', 10, 'bold'))\t\t\r\n\t\tcanvas3.pack(padx=5, pady=10, side=tkinter.LEFT, anchor=SW)\t\t\r\n\r\n# \t\tcanvas4 = Canvas(frame1, bg='gray', width=400, height=300)\r\n# \t\tcanvas4.pack(padx=5, pady=10, side=tkinter.RIGHT, anchor=SE)\t\r\n\t\t\r\n\t\tframe1.pack(expand=1, fill=X, pady=10, padx=5)\t\t\r\n\r\n# \t\t\r\n# \t\t# Create a canvas that can fit the above video source size\r\n# \t\t#self.canvas = tkinter.Canvas(window, width = self.vid.width, height = self.vid.height)\r\n# \t\tself.canvas = tkinter.Canvas(window, width = 800, height = 600)\r\n\t\t\r\n\t\tbtn = tkinter.Button(self.window, text=\"Close\", command=self.window.destroy)\r\n\t\tbtn.pack(side=\"bottom\", padx=10, pady=10)\t\t\t\r\n\t\t\r\n\t\tself.pb = ttk.Progressbar(self.window, orient=\"horizontal\", length=750, mode=\"determinate\", value=0)\r\n\t\tself.pb.pack()\t\r\n\t\t\r\n\t\t# After it is called once, the update method will be automatically called every delay milliseconds\r\n\t\tself.delay = 15\r\n\t\tself.update()\r\n\t\t\r\n\t\tself.window.mainloop()\r\n\t\r\n\tdef update(self):\r\n\t\t# Get a frame from the video source\r\n\t\tret, frame = self.vid.get_expression()\r\n\t\t\r\n\t\tif ret:\r\n\t\t\tself.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))\r\n\t\t\tself.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)\r\n\r\n\t\t\tself.pb['value'] = float(np.random.randint(0, 100 + 1))\r\n\t\t\r\n\t\tself.window.after(self.delay, self.update)\r\n\t\t\r\n\r\nclass MyVideoCapture:\r\n\t\r\n\tdef __init__(self, video_source=0):\r\n\t\t# Open the video source\r\n\t\tself.vid = cv2.VideoCapture(video_source)\r\n\t\t\r\n\t\tif not self.vid.isOpened():\r\n\t\t\traise ValueError(\"Unable to open video source\", video_source)\r\n\t\r\n\t\t# Get video source width and height\r\n\t\tself.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\r\n\t\tself.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\n\t\r\n\r\n\tdef get_expression(self):\r\n\t\twhile True:\r\n\r\n\t\t\tcap = cv2.VideoCapture(0)\r\n\t\t\tret, test_img = cap.read() # captures frame and returns boolean value and captured image\r\n\t\t \r\n\t\t\tif not ret:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tgray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\r\n\t\t\r\n\t\t\tfaces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)\r\n\t\t\r\n\t\t\tfor (x, y, w, h) in faces_detected:\r\n\t\t\t\tcv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)\r\n\t\t\t\troi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image\r\n\t\t\t\troi_gray = cv2.resize(roi_gray, (48, 48))\r\n\t\t\t\timg_pixels = image.img_to_array(roi_gray)\r\n\t\t\t\timg_pixels = np.expand_dims(img_pixels, axis=0)\r\n\t\t\t\timg_pixels /= 255\r\n\t\t\r\n\t\t\t\tpredictions = model.predict(img_pixels)\r\n\t\t\r\n\t\t # find max indexed array\r\n\t\t\t\tmax_index = np.argmax(predictions[0])\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t#self.cv2.create_text(400, 10, text=max_index, font=('verdana', 20, 'bold'))\r\n\r\n\t\t\t\temotions = ('anger', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')\r\n\t\t\t\tpredicted_emotion = emotions[max_index]\r\n\t\t\r\n\t\t\t\tcv2.putText(test_img, predicted_emotion, (int(x+20), int(y-20)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n\t\t\r\n\t\t\tresized_img = cv2.resize(test_img, (400, 300))\r\n\t\t\treturn (ret, cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB))\t\r\n\t\t\r\n\t\r\n\t# Release the video source when the object is destroyed\r\n\tdef __del__(self):\r\n\t\tif self.vid.isOpened():\r\n\t\t\tself.vid.release()\r\n\r\n# Create a window and pass it to the Application object\r\nApp(tkinter.Tk(), \"NonLutte - Facial Expression Recognition App\")\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "VisualAI_EmotionDetection_Final_APP.py", "file_name": "VisualAI_EmotionDetection_Final_APP.py", "file_ext": "py", "file_size_in_byte": 5039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "keras.models.model_from_json", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "Tkinter.ttk.Progressbar", "line_number": 69, "usage_type": "call"}, {"api_name": "Tkinter.ttk", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 102, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 114, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 136, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 136, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 138, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "12236195", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nimport datetime\nfrom django.utils import timezone\n# from django.utils.html import mark_safe\nfrom .thumbs import ImageWithThumbsField\n\nclass Department(models.Model):\n dept_code = models.CharField(max_length=3)\n dept_name = models.CharField(max_length=50)\n\n def __str__(self):\n return self.dept_name\n\nclass Course(models.Model):\n course_code = models.CharField(max_length=6)\n course_name = models.CharField(max_length=50)\n dept_fk = models.ForeignKey(Department, on_delete=models.CASCADE)\n #dept_fk = models.ManyToManyField(Department, on_delete=models.SET_NULL)\n course_desc = models.TextField('Course Description',max_length=100)\n \n def __str__(self):\n return self.course_name\n\nclass Student(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE, null=True, blank=True)\n # course_fk = models.ManyToManyField(Course) #, on_delete=models.CASCADE)\n # dept_fk = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)\n birth_date = models.DateField(null=True, blank=True)\n phone_no = models.IntegerField(default=0)\n firstname = models.CharField(max_length=20, null=True, blank=True)\n lastname = models.CharField(max_length=20, null=True, blank=True)\n email = models.EmailField(max_length=50, null=True, blank=True)\n\n # def assign_things(self)\n # user.first_name = self.firstname\n # user.last_name = self.lastname\n # user.email = self.email\n\n # def __str__(self):\n # return self.user.first_name + self.user.last_name\n\n# def create_profile(sender,**kwargs):\n# if kwargs['created']:\n# user_profile=Student.objects.create(user=kwargs['instance'])\n\n# post_save.connect(create_profile,sender=User)\n\n# @receiver(post_save, sender=User)\n# def create_user_profile(sender, instance, created, **kwargs):\n# if created:\n# Student.objects.create(user=instance)\n\n# @receiver(post_save, sender=User)\n# def save_user_profile(sender, instance, **kwargs):\n# instance.profile.save()\n\n# class QuestionBank(models.Model):\n# question_fk = models.ForeignKey('Course', Course, on_delete=models.CASCADE)\n# def __str__(self):\n# return self.course_fk.course_code\n\nclass Exam(models.Model):\n exam_name = models.CharField(max_length=40)\n course_fk = models.ForeignKey(Course, verbose_name='Course', on_delete=models.CASCADE, null=True, blank=True)\n # question_fk = models.ManyToManyField(Question)\n\n time_limit = models.DurationField()\n pub_date = models.DateTimeField('Date Published', auto_now_add=True, editable=False)\n\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=1) <= self.pub_date <= now\n\n was_published_recently.short_description = 'Recently Published?'\n was_published_recently.boolean = True\n was_published_recently.admin_order_field = 'pub_date'\n\n def __str__(self):\n return self.exam_name\n\n\nclass Question(models.Model):\n qn_text = models.TextField('Question Description',max_length=200)\n qn_image = ImageWithThumbsField('Question Image', upload_to='img/', sizes=((125,125),(300,200)))\n # qn_bank = models.ForeignKey(QuestionBank, on_delete=models.CASCADE, verbose_name='IN QNbank')\n exams = models.ManyToManyField(Exam)\n course_fk = models.ForeignKey(Course, verbose_name='Course', on_delete=models.CASCADE, null=True, blank=True)\n pub_date = models.DateTimeField('date published', auto_now_add=True, editable=False)\n # correct_choice = models.ForeignKey(Choice)\n\n def __str__(self):\n return self.qn_text[:20]\n\n # def image_tag(self):\n # from django.utils.html import escape\n # return u'' % escape(self.qn_image)\n # image_tag.short_description = 'Image'\n # image_tag.allow_tags = True\n\n # def image_img(self):\n # if self.image:\n # return mark_safe('' % self.qn_image.url_125x125)\n # else:\n # return '(No image)'\n # image_img.short_description = 'Thumb'\n\n # def image_tag(self):\n # return mark_safe('\"Question' % (self.qn_image))\n\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=1) <= self.pub_date <= now\n\n was_published_recently.short_description = 'Recently Published?'\n was_published_recently.boolean = True\n was_published_recently.admin_order_field = 'pub_date'\n\nclass Choice(models.Model):\n question = models.ForeignKey(Question, on_delete=models.CASCADE)\n choice_text = models.CharField(max_length=200)\n is_correct = models.BooleanField('Correct Answer', default=False)\n\n def __str__(self):\n return self.choice_text\n\nclass Result(models.Model):\n exam_fk = models.ForeignKey(Exam, on_delete=models.CASCADE)\n student_fk = models.ForeignKey(Student, on_delete=models.CASCADE,null=True,blank=True)\n # question = models.ForeignKey(Question, on_delete=models.CASCADE)\n choice = models.ForeignKey(Question, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.student_fk.user.username\n\n", "sub_path": "exam_system/stud_app/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 5322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models.TextField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.models.DateField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.db.models.DurationField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 75, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "thumbs.ImageWithThumbsField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 91, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 115, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 115, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 116, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 123, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 123, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 124, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 130, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 131, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 132, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 132, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 134, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 134, "usage_type": "attribute"}]} +{"seq_id": "404992545", "text": "import unittest\nfrom common import logger,login_token,base\nfrom data.readexcel import ExcelUtil\n\n\ndata = ExcelUtil(\"MembershipSubscription\").dict_data()\nclass Detailsofpayment(unittest.TestCase):\n\n def setUp(self):\n self.log = logger.Log()\n\n def test_details_of_payment(self):\n '''获取会员出款详情'''\n route = data[4][\"route\"]\n url = \"\".join(base.get_url(route))\n token = login_token.login().get_token()\n header = eval(data[4][\"header\"])\n header[\"token\"] = token\n kwargs = {\"json\": token, \"headers\": header}\n Method = data[4][\"method\"]\n resp = base.get_response(url,Method,**kwargs)\n self.log.info(\"--------start--------\")\n self.assertIn(data[4][\"expect\"], resp.text, msg=\"失败原因:%s not in %s\" % (data[4][\"expect\"], resp.text))\n self.log.info(\"------test is pass------\")\n self.log.info(\"---------end---------\")\n\nif __name__ == \"__main__\":\n unittest.main()", "sub_path": "java_auto_project/case/FundManagement(资金管理)/MemberWithdrawals(会员提款)/test_details_of_payment.py", "file_name": "test_details_of_payment.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "data.readexcel", "line_number": 6, "usage_type": "name"}, {"api_name": "data.readexcel.ExcelUtil", "line_number": 6, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "common.logger.Log", "line_number": 10, "usage_type": "call"}, {"api_name": "common.logger", "line_number": 10, "usage_type": "name"}, {"api_name": "data.readexcel", "line_number": 14, "usage_type": "name"}, {"api_name": "common.base.get_url", "line_number": 15, "usage_type": "call"}, {"api_name": "common.base", "line_number": 15, "usage_type": "name"}, {"api_name": "common.login_token.login", "line_number": 16, "usage_type": "call"}, {"api_name": "common.login_token", "line_number": 16, "usage_type": "name"}, {"api_name": "data.readexcel", "line_number": 17, "usage_type": "name"}, {"api_name": "data.readexcel", "line_number": 20, "usage_type": "name"}, {"api_name": "common.base.get_response", "line_number": 21, "usage_type": "call"}, {"api_name": "common.base", "line_number": 21, "usage_type": "name"}, {"api_name": "data.readexcel", "line_number": 23, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "639110471", "text": "'''\nfcn.py 的任务\n1. 实现双线插值 \n2. 实现FCN本人 \n(人 •͈ᴗ•͈) ۶♡♡\n'''\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torchvision import models\n\n#双插\ndef Bilinear_interpolation (src, new_size):\n '''\n 使用双线性插值方法放大图像\n params:\n src(np.ndarray):输入图片\n new_size(tuple):目标尺寸\n ret:\n dst(np.ndarry):目标图像\n '''\n dst_h, dst_w = new_size #目标图像的hw\n src_h, src_w = src.shape[:2] #原始图像的hw\n\n #如果跟需求符合, 就不需要缩放,直接拷贝\n if src_h == dst_h and src_w == dst_w:\n return src.copy()\n\n scale_x = float(src_w) / dst_w\n scale_y = float(src_H) / dst_h\n\n #遍历目标图上的每个像素点\n ##构建一张目标大小的空图,遍历差值\n dst = np.zeros((dst_h,dst_w,3),dtype=np.int8)\n ##因为是彩色图,遍历三层: a.rgb三通道 b.height c.width\n for n in range(3):\n for dst_y in range(dst_h):\n for dst_x in range(dst_w):\n \n #目标像素在原图上的坐标 src+0.5 = (dst_x + 0.5) *scale_x\n #加0.5的偏差,可以保证图像缩小时,不会漏掉像素点 详细看:https://www.cnblogs.com/kk17/p/9989984.html\n src_x = (dst_x + 0.5)*scale_x -0.5\n src_y = (dst_y + 0.5)*scale_y -0.5\n\n #计算在原图某像素点的4个近邻点的位置\n src_x_0 = int(np.floor(src_x)) #*floor()向下取整数 ex: floor(1.2) = 1.0\n src_y_0 = int(np.floor(src_y))\n src_x_1 = min(src_x_0 + 1, src_w - 1 ) #防止出界\n src_y_1 = min(src_y_0 + 1, src_h - 1 )\n\n\n'''\n初始化反卷积核\n'''\ndef bilinear_kernel(in_channels, out_channels, kernel_size):\n \"\"\"Define a bilinear kernel according to in channels and out channels.\n Returns:\n return a bilinear filter tensor\n \"\"\"\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n bilinear_filter = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float32)\n weight[range(in_channels), range(out_channels), :, :] = bilinear_filter\n return torch.from_numpy(weight)\n\npretrained_net = models.vgg16_bn(pretrained=False)\n\n#FCN本人 对应fcn.png\nclass FCN(nn.Module):\n def __init__(self, num_classes):\n super().__init__() #关于torch中super用法 #https://blog.csdn.net/genous110/article/details/90105497\n \n\n self.stage1 = pretrained_net.features[:7]\n self.stage2 = pretrained_net.features[7:14]\n self.stage3 = pretrained_net.features[14:24]\n self.stage4 = pretrained_net.features[24:34]\n self.stage5 = pretrained_net.features[34:]\n\n self.scores1 = nn.Conv2d(512, num_classes, 1)\n self.scores2 = nn.Conv2d(512, num_classes, 1)\n self.scores3 = nn.Conv2d(128, num_classes, 1)\n\n self.conv_trans1 = nn.Conv2d(512, 256, 1)\n self.conv_trans2 = nn.Conv2d(256, num_classes, 1)\n\n self.upsample_8x = nn.ConvTranspose2d(num_classes, num_classes, 16, 8, 4, bias=False)\n self.upsample_8x.weight.data = bilinear_kernel(num_classes, num_classes, 16)\n\n self.upsample_2x_1 = nn.ConvTranspose2d(512, 512, 4, 2, 1, bias=False)\n self.upsample_2x_1.weight.data = bilinear_kernel(512, 512, 4)\n\n self.upsample_2x_2 = nn.ConvTranspose2d(256, 256, 4, 2, 1, bias=False)\n self.upsample_2x_2.weight.data = bilinear_kernel(256, 256, 4)\n\n def forward(self, x):\n s1 = self.stage1(x)\n s2 = self.stage2(s1)\n s3 = self.stage3(s2)\n s4 = self.stage4(s3)\n s5 = self.stage5(s4)\n\n scores1 = self.scores1(s5)\n s5 = self.upsample_2x_1(s5)\n add1 = s5 + s4\n\n scores2 = self.scores2(add1)\n\n add1 = self.conv_trans1(add1)\n add1 = self.upsample_2x_2(add1)\n add2 = add1 + s3\n\n output = self.conv_trans2(add2)\n output = self.upsample_8x(output)\n return output\n\n\n\nif __name__ == \"__main__\":\n rgb = torch.randn(1, 3, 352, 480)\n net = FCN(12)\n out = net(rgb)\n print('喵喵喵喵喵喵喵喵---------------')\n print(out.shape)", "sub_path": "models/fcn.py", "file_name": "fcn.py", "file_ext": "py", "file_size_in_byte": 4413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.ogrid", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 70, "usage_type": "call"}, {"api_name": "torchvision.models.vgg16_bn", "line_number": 72, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "412799970", "text": "import numpy as np\nfrom statistics import mode\nfrom sklearn.model_selection import KFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\n\n\nclass Ensamble:\n\n # Constructor para inicializar datos\n def __init__(self):\n # Clasificadores\n self.clf_1: KNeighborsClassifier = None\n self.clf_2: GaussianNB = None\n self.clf_3: MLPClassifier = None\n\n # ¿Utilizar PCA en clf1?\n self.pca_1: bool = False\n # ¿Utilizar PCA en clf1?\n self.pca_2: bool = False\n\n # Predicción final\n self.pred_final: list = []\n self.clases: list = []\n\n # Resultados\n self.resultados: list = []\n\n # Inicializar primer clasificador\n def k_vecinos(self, n_vecinos: int, pca: bool = False):\n self.clf_1 = KNeighborsClassifier(n_neighbors=n_vecinos)\n self.pca_1 = pca\n\n # Inicializar segundo clasificador\n def nativa_Bayes(self, pca: bool = False):\n self.clf_2 = GaussianNB()\n self.pca_2 = pca\n\n # Inicializar tercer clasificador\n def red_neuronal(self, capas_ocultas: str,\n activacion: str, solucionador: str,\n alfa: float, max_itr: int):\n\n layer_sizes: list = capas_ocultas.split(',')\n\n for i in range(len(layer_sizes)):\n layer_sizes[i] = int(layer_sizes[i])\n\n hls = tuple(layer_sizes)\n\n self.clf_3 = MLPClassifier(hidden_layer_sizes=hls,\n activation=activacion,\n solver=solucionador, alpha=alfa,\n max_iter=max_itr)\n\n # Entrenar clasificadores\n def fit(self, X, y, pca_X):\n # Decidir si se desea utilizar el espacio PCA\n if self.pca_1:\n self.clf_1.fit(pca_X, y)\n else:\n self.clf_1.fit(X, y)\n\n if self.pca_2:\n self.clf_2.fit(pca_X, y)\n else:\n self.clf_2.fit(X, y)\n\n self.clf_3.fit(X, y)\n\n # Ensamble\n def ensamble_votacion(self, X, y, pca_X):\n # Decidir si se desea utilizar el espacio PCA\n if self.pca_1:\n pred_1 = self.clf_1.predict(pca_X)\n else:\n pred_1 = self.clf_1.predict(X)\n\n if self.pca_2:\n pred_2 = self.clf_2.predict(pca_X)\n else:\n pred_2 = self.clf_2.predict(X)\n\n pred_3 = self.clf_3.predict(X)\n\n # Ensamble de votación: se toma la clase\n # con más frecuencia gracias a la función mode()\n prediccion = np.array([])\n for i in range(0, len(X)):\n prediccion = np.append(prediccion, mode([pred_1[i], pred_2[i], pred_3[i]]))\n\n # Guardar los datos para la predicción final\n for j in range(0, len(X)):\n self.pred_final.append(int(prediccion[j]))\n\n # Guardar la puntuación (parcial) del resultado\n self.resultados.append(self.puntuacion(y, prediccion))\n\n @staticmethod\n def puntuacion(y, p):\n count = 0\n for i in range(0, len(y)):\n if y[i] == p[i]:\n count += 1\n score = (count * 100) / len(y)\n\n return score\n\n # Limpiar datos de colecciones\n def __clear_data(self):\n self.pred_final.clear()\n self.clases.clear()\n self.resultados.clear()\n\n # Validación cruzada (kfold)\n def validacion_cruzada(self, n_splits: int, X, y, pca_X):\n self.__clear_data()\n\n # Inicializar kfold con el número de divisiones\n kf = KFold(n_splits=n_splits)\n\n # Datos de entrenamiento y prueba\n # con los que no se utilizará PCA\n X_train: list = []\n X_test: list = []\n y_train: list = []\n y_test: list = []\n\n # Datos de entrenamiento y prueba\n # con los que se utilizará PCA\n pca_X_train: list = []\n pca_X_test: list = []\n\n # Llenar datos de entrenamiento y prueba\n for train_i, test_i in kf.split(X, y):\n X_train.append(X[train_i])\n X_test.append(X[test_i])\n y_train.append(y[train_i])\n y_test.append(y[test_i])\n y_tt = y[test_i]\n\n # Guardar datos para comparación final\n for i in range(0, len(y_tt)):\n self.clases.append(y_tt[i])\n\n # Llenar datos de entrenamiento y prueba con PCA\n for train_index, test_index in kf.split(pca_X, y):\n pca_X_train.append(X[train_index])\n pca_X_test.append(X[test_index])\n\n # Entrenamiento y ensamble por pliegue de la validación\n for i in range(0, len(X_train)):\n self.fit(X_train[i], y_train[i], pca_X_train[i])\n self.ensamble_votacion(X_test[i], y_test[i], pca_X_test[i])\n\n # Retornar la puntuación promedio\n return self.puntuacion(self.clases, self.pred_final)\n", "sub_path": "Ensamble.py", "file_name": "Ensamble.py", "file_ext": "py", "file_size_in_byte": 4906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 14, "usage_type": "name"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 15, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 16, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 91, "usage_type": "call"}, {"api_name": "statistics.mode", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "452662021", "text": "from pathlib import Path\n\nimport torch\nfrom torch import nn\nfrom torch.nn.modules import Module\nfrom typing import TypeVar, Callable, Tuple, Optional, Any, Mapping\n\nfrom model import EAST\n\nModel = TypeVar(\"Model\", bound=Module)\n\n# @dataclass\n# class LoadedModel(Generic[Model]):\n# model: Model\n# device: torch.device\n\n\ndef load_east_model(\n serialized_model: Path, pretrained: bool = True, set_eval: bool = True\n) -> Tuple[EAST, torch.device]:\n return load_model(\n serialized_model, model_init=lambda: EAST(pretrained), set_eval=set_eval,\n )\n\n\ndef get_torch_device(cuda_device_num: int = 0) -> torch.device:\n return torch.device(\n f\"cuda:{cuda_device_num}\" if torch.cuda.is_available() else \"cpu\"\n )\n\n\ndef load_model(\n serialized_model: Path,\n model_init: Callable[[], Model],\n set_eval: bool = True,\n cuda_device_num: int = 0,\n) -> Tuple[Model, torch.device]:\n device = torch.device(\n f\"cuda:{cuda_device_num}\" if torch.cuda.is_available() else \"cpu\"\n )\n model = model_init().to(device)\n model.load_state_dict(\n torch.load(str(serialized_model.absolute()), map_location=device)\n )\n if set_eval:\n model.eval()\n return model, device\n\n\nclass EarlyStopping:\n \"\"\"Early stopping regularization. Use :func:`observe_step` on each model training epoch.\n\n Source: https://github.com/Bjarten/early-stopping-pytorch/blob/master/pytorchtools.py\n \"\"\"\n\n def __init__(\n self,\n model_name_prefix: str,\n lower_is_better: bool,\n patience: int = 7,\n verbose: bool = False,\n delta: float = 0.0,\n ) -> None:\n \"\"\"Performs field assignment with the supplied parameters and initializes internal state.\n\n Args:\n model_name_prefix: Name for model\n lower_is_better: If `True`, lower values of the validation metric are better.\n Otherwise, larger values are considered an improvement.\n patience: How long to wait after last time validation metric improved.\n verbose: If True, prints a message for each validation metric improvement.\n delta: Minimum change in the monitored quantity to qualify as an improvement.\n \"\"\"\n self.model_name_prefix = model_name_prefix\n self.lower_is_better = lower_is_better\n self.patience = patience\n self.verbose = verbose\n self.delta = delta\n self.reset()\n\n def reset(self) -> None:\n \"\"\"Sets all mutable state to initial conditions.\n\n NOTE: MUTATION: Initializes `counter`, `early_stop`, `best_val_metric`, `checkpoint_num`,\n `best_name`.\n \"\"\"\n self.counter = 0\n self.early_stop = False\n self.best_val_metric: Optional[float] = None\n self.checkpoint_num = 0\n self.best_name = \"\"\n\n def __call__(self, *args) -> bool:\n \"\"\"Alias for :func:`observe_step` and then returns whether or not the\n early stopping criterion was hit.\n \"\"\"\n self.observe_step(*args)\n return self.early_stop\n\n def observe_step(self, val_metric: float, model: nn.Module) -> None:\n \"\"\"Observe the validation metric on the `model` for a discrete training step.\n\n NOTE: MUTATION: Potentially updates `counter`, `best_score`, `early_stop`,\n `best_val_metric`, `checkpoint_num`.\n \"\"\"\n if self.early_stop:\n if self.verbose:\n print(\n f\"Cannot observe step. Already stopped early.\\n{self.saved_info()}\"\n )\n elif self.loss_improvement(val_metric):\n self.save_checkpoint(val_metric, model)\n else:\n self.increment()\n\n def loss_improvement(self, val_metric: float) -> bool:\n \"\"\"Evaluates to `True` iff `val_metric` is an improvement on the best observed validation metric.\n `False` otherwise.\n \"\"\"\n return self.best_val_metric is None or (\n # e.g. new loss is lower than the best & the improvement threshold\n (val_metric < self.best_val_metric - self.delta)\n if self.lower_is_better\n else (val_metric > self.best_val_metric + self.delta)\n # e.g. new accuracy is higher than the best & the improvement threshold\n )\n\n def save_checkpoint(self, val_loss: float, model: nn.Module) -> None:\n \"\"\"Checkpoints model. Use when `val_loss` is an improvement.\n\n NOTE: MUTATION: Sets `best_val_metric`, `best_score` to neg. val loss, resets `counter`,\n and increments `checkpoint_num`.\n \"\"\"\n if self.verbose:\n if self.best_val_metric is None:\n print(\n \"Initial observation. \"\n f\"Setting best validation metric to '{val_loss:.6f}' \"\n f\"for checkpoint '{self.checkpoint_num}'\"\n )\n else:\n print(\n f\"Validation metric improvement ({self.best_val_metric:.6f} --> {val_loss:.6f}). \"\n f\"Saving model for checkpoint '{self.checkpoint_num}'...\"\n )\n filename = self.checkpoint_name()\n torch.save(model.state_dict(), filename)\n self.best_name = filename\n self.best_val_metric = val_loss\n self.counter = 0\n self.checkpoint_num += 1\n\n def checkpoint_name(self) -> str:\n \"\"\"Current filename for model when it is checkpointed next.\n \"\"\"\n return f\"{self.model_name_prefix}--{self.checkpoint_num}_checkpoint.pth\"\n\n def increment(self) -> None:\n \"\"\"Increment internal counters due to observing a training step without an improvement of validation loss.\n Sets `early_stop` to `True` iff the incrementing the `counter` here exceeds the `patience` threshold.\n\n NOTE: MUTATION: Increments `counter`, potentially sets `early_stop`.\n \"\"\"\n self.counter += 1\n if self.verbose:\n print(f\"EarlyStopping counter: {self.counter} out of {self.patience}\")\n if self.counter >= self.patience:\n self.early_stop = True\n if self.verbose:\n print(f\"Stopped early. {self.saved_info()}\")\n\n def saved_info(self) -> str:\n \"\"\"Human-readable logging string of the current minimum validation loss and checkpoint model filename.\n \"\"\"\n return f\"Best validation metric '{self.best_val_metric:.6f}' saved as '{self.best_name}'\"\n", "sub_path": "reusable.py", "file_name": "reusable.py", "file_ext": "py", "file_size_in_byte": 6499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "typing.TypeVar", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.modules.Module", "line_number": 10, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "name"}, {"api_name": "model.EAST", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "model.EAST", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 39, "usage_type": "attribute"}, {"api_name": "model.load_state_dict", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 43, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 37, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 147, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "248374868", "text": "#!/usr/bin/python3\n\nimport json\nimport flask\nimport random\nimport os\nimport ankura \nimport time\nimport pickle\nfrom tqdm import tqdm\nimport sys\nimport tempfile\nimport threading\n\napp = flask.Flask(__name__, static_url_path='')\n\nuser_data = list()\n\ndataset_name = sys.argv[1]\n\ntrain_size = 10000\ntest_size = 500\nnumber_of_topics = 50\nlabel_weight = 1\nsmoothing = 0\n\nif sys.argv[1]=='newsgroups':\n attr_name = 'coarse_newsgroup'\n corpus = ankura.corpus.newsgroups()\nelif sys.argv[1]=='yelp':\n attr_name = 'binary_rating'\n corpus = ankura.corpus.yelp()\nelif sys.argv[1]=='tripadvisor':\n attr_name = 'label'\n corpus = ankura.corpus.tripadvisor()\nelif sys.argv[1]=='amazon':\n attr_name = 'binary_rating'\n corpus = ankura.corpus.amazon()\n\ndef calculate_user_data_accuracy(user_data, Q, test_corpus, train_corpus, attr_name):\n for i, data in enumerate(user_data):\n anchor_vectors = ankura.anchor.tandem_anchors(data[0], Q, corpus)\n lr_accuracy = ankura.validate.anchor_accuracy(Q, anchor_vectors, test_corpus, train_corpus, attr_name)\n print('Instance', i, 'Free Classifier Accuracy:', data[1], 'Logistic Regression Accuracy:', lr_accuracy)\n\n return\n\n@ankura.util.pickle_cache(sys.argv[1] + '.pickle')\ndef load_data():\n split = ankura.pipeline.test_train_split(corpus, num_train=train_size, num_test=test_size, return_ids=True)\n (train_ids, train_corpus), (test_ids, test_corpus) = split\n\n Q, labels = ankura.anchor.build_labeled_cooccurrence(corpus, attr_name, train_ids,\n label_weight=label_weight, smoothing=smoothing)\n\n gs_anchor_indices = ankura.anchor.gram_schmidt_anchors(corpus, Q, k=number_of_topics, return_indices=True)\n gs_anchor_vectors = Q[gs_anchor_indices]\n gs_anchor_tokens = [[corpus.vocabulary[index]] for index in gs_anchor_indices]\n return Q, labels, train_ids, train_corpus, test_ids, test_corpus, gs_anchor_vectors, gs_anchor_indices, gs_anchor_tokens\n\n\nQ, labels, train_ids, train_corpus, test_ids, test_corpus, gs_anchor_vectors, gs_anchor_indices, gs_anchor_tokens = load_data()\n\n\n@app.route('/')\ndef serve_itm():\n return app.send_static_file('index.html')\n\n@app.route('/vocab')\ndef get_vocab():\n return flask.jsonify(vocab=corpus.vocabulary)\n\n\n@app.route('/finished', methods=['GET', 'POST'])\ndef finish():\n\n directory = os.path.join('FinalAnchors', sys.argv[1])\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n\n pickle.dump(user_data, tempfile.NamedTemporaryFile(mode='wb',\n delete=False,\n prefix=sys.argv[1],\n suffix='.pickle',\n dir=directory,\n ))\n\n t = threading.Thread(target=calculate_user_data_accuracy, args=(user_data, Q, test_corpus, train_corpus, attr_name,))\n t.start()\n\n return 'OK'\n\n@app.route('/topics')\ndef topic_request():\n raw_anchors = flask.request.args.get('anchors')\n\n start=time.time()\n if raw_anchors is None:\n anchor_tokens, anchor_vectors = gs_anchor_tokens, gs_anchor_vectors\n else:\n anchor_tokens = json.loads(raw_anchors)\n anchor_vectors = ankura.anchor.tandem_anchors(anchor_tokens, Q, corpus)\n print('***tadem_anchors:', time.time()-start)\n\n start=time.time()\n C, topics = ankura.anchor.recover_topics(Q, anchor_vectors, epsilon=1e-5, get_c=True)\n print('C SHAPE :', C.shape)\n\n print('***recover_topics:', time.time()-start)\n\n start=time.time()\n topic_summary = ankura.topic.topic_summary(topics[:len(corpus.vocabulary)], corpus)\n print('***topic_summary:', time.time()-start)\n\n start=time.time()\n\n classifier = ankura.topic.free_classifier_dream(corpus, attr_name, labeled_docs=train_ids, topics=topics, C=C, labels=labels)\n print('***Get Classifier:', time.time()-start)\n\n contingency = ankura.validate.Contingency()\n\n start=time.time()\n for doc in test_corpus.documents:\n gold = doc.metadata[attr_name]\n pred = classifier(doc)\n contingency[gold, pred] += 1\n print('***Classify:', time.time()-start)\n print('***Accuracy:', contingency.accuracy())\n\n user_data.append((anchor_tokens, contingency.accuracy()))\n\n return flask.jsonify(anchors=anchor_tokens,\n topics=topic_summary,\n accuracy=contingency.accuracy())\n\n\nif __name__ == '__main__':\n if len(sys.argv)>2:\n port = int(sys.argv[2])\n else:\n port=5000\n app.run(debug=True, host='0.0.0.0', port=port)\n", "sub_path": "tbuie.py", "file_name": "tbuie.py", "file_ext": "py", "file_size_in_byte": 4660, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ankura.corpus.newsgroups", "line_number": 29, "usage_type": "call"}, {"api_name": "ankura.corpus", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ankura.corpus.yelp", "line_number": 32, "usage_type": "call"}, {"api_name": "ankura.corpus", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ankura.corpus.tripadvisor", "line_number": 35, "usage_type": "call"}, {"api_name": "ankura.corpus", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "ankura.corpus.amazon", "line_number": 38, "usage_type": "call"}, {"api_name": "ankura.corpus", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ankura.anchor.tandem_anchors", "line_number": 42, "usage_type": "call"}, {"api_name": "ankura.anchor", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ankura.validate.anchor_accuracy", "line_number": 43, "usage_type": "call"}, {"api_name": "ankura.validate", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ankura.pipeline.test_train_split", "line_number": 50, "usage_type": "call"}, {"api_name": "ankura.pipeline", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ankura.anchor.build_labeled_cooccurrence", "line_number": 53, "usage_type": "call"}, {"api_name": "ankura.anchor", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ankura.anchor.gram_schmidt_anchors", "line_number": 56, "usage_type": "call"}, {"api_name": "ankura.anchor", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ankura.util.pickle_cache", "line_number": 48, "usage_type": "call"}, {"api_name": "ankura.util", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 83, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 85, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "ankura.anchor.tandem_anchors", "line_number": 104, "usage_type": "call"}, {"api_name": "ankura.anchor", "line_number": 104, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "ankura.anchor.recover_topics", "line_number": 108, "usage_type": "call"}, {"api_name": "ankura.anchor", "line_number": 108, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "ankura.topic.topic_summary", "line_number": 114, "usage_type": "call"}, {"api_name": "ankura.topic", "line_number": 114, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "ankura.topic.free_classifier_dream", "line_number": 119, "usage_type": "call"}, {"api_name": "ankura.topic", "line_number": 119, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "ankura.validate.Contingency", "line_number": 122, "usage_type": "call"}, {"api_name": "ankura.validate", "line_number": 122, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 124, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "121065528", "text": "'''\nDiciamo che un dizionario d rappresenta un albero (e lo indichiamo come dizionario-albero)\nse ciascuna chiave di d e' un identificativo di un nodo dell'albero e l'attributo della chiave e' la lista \n(eventualmente vuota) degli identificativi dei figli del nodo. Gli identificativi dei nodi \nall'interno delle liste sono in ordine lessicografico crescente.\n\n\n\nEcco un esempio di dizionario d che rappresenta un dizionario-albero\n\nd={\n'a':['b'],\n'b':['c','d'],\n'c':['i'],\n'd':['e','l'],\n'e':['f','g','h'],\n'f':[],\n'g':[],\n'h':[],\n'i':[],\n'l':[]\n}\n\nL'albero rappresentato da d e'\n\n 'a'\n |\n _____________'b'____________ \n | | \n 'c' ________'d'_______ \n | | | \n 'i' _______'e'_______ 'l'\n | | | \n 'f' 'g' 'h'\n |\n 'i'\n\n\nImplementare le seguenti funzioni:\n\n1) \nla funzione genera_sottoalbero(fnome,x,fout) che, presi:\n\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un identificativo x\n- il nome di un file json (fout)\n\nproduce il dizionario-albero che rappresenta il sottoalbero radicato \nnell'identificativo x che si ottiene dal dizionario-albero d. \nIl dizionario-albero ottenuto va registrato nel file fout.\nSe l'identificativo x non e' tra i nodi di d allora il dizionario-albero prodotto \ndeve essere vuoto.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ngenera_sottoalbero(fname,'d',fout)\nil file fout conterra' il dizionario\n{'f': [], 'g': [], 'h': [], 'e': ['f', 'g', 'h'], 'l': [], 'd': ['e', 'l']}\n\n\n\n2)\nla funzione cancella_sottoalbero(fnome,x,fout) che, presi:\n\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un identificativo x\n- il nome di un file json (fout)\n\nricava da d il sottoalbero radicato in x e lo salva nel file fout.\nSe x non e' presente tra le chiavi di d allora il dizionario-albero d non viene modificato.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ncancella_sottoalbero(fname,'d',fout)\nil file fout conterra' il dizionario\n{'a': ['b'], 'b': ['c'], 'c': ['i'], 'i':[]}\n\n\n3)\nla funzione dizionario_livelli(fnome, fout) che, presi:\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- il nome di un file json (fout)\n\ncostruisce il dizionario che ha come chiavi i livelli del dizionario-albero d. L'attributo di una \nchiave di valore x e' la lista degli identificativi dei nodi che si trovano a livello x nell'albero rappresentato da d. \nLa lista è ordinata lessicograficamente ed in modo crescente. \nIl dizionario cosi' costruito va registrato nel file fout.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ndizionario_livelli(fname,fout)\nil file fout conterra' il dizionario\n{0: ['a'], 1: ['b'], 2: ['c', 'd'], 3: ['e','i','l'], 4: ['f', 'g', 'h']}\n\n4)\nla funzione dizionario_gradi_antenati(fnome,y,fout) che, presi:\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un intero y\n- il nome di un file json (fout)\n\ncostuisce il dizionario che ha come chiavi gli identificativi dei nodi dell'albero \nrappresentato dal dizionario-albero d, Attributo di una chiave di valore x e' il numero \ndi antenati di grado y che ha il nodo con identificativo x nell'albero.\nRegistra il dizionario costruito nel file fout.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ndizionario_gradi_antenati(fnome,2,fout)\nil file fout conterra' il dizionario \n{'a': 0, 'b': 0, 'c': 1, 'd': 1, 'e': 2, 'f': 2, 'g': 2, 'h': 2, 'i': 1, 'l': 2}\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n'''\n\n\n\n\nimport json\n\n\n \n\ndef genera_sottoalbero(fnome,x,fout):\n '''inserire qui il vostro codice'''\n json_data=open(fnome).read()\n \n diz=json.loads(json_data)\n \n diz2=estrapola(diz,x)\n with open(fout,'w') as outfile:\n json.dump(diz2,outfile) \ndef estrapola(diz,v):\n valore=diz[v]\n if valore==None:\n diz2={v:[]} #prova\n else:\n diz2={v:valore} \n for el in valore:\n diz3=estrapola(diz,el)\n diz2.update(diz3)\n return diz2 \ndef cancella_sottoalbero(fnome,x,fout):\n '''inserire qui il vostro codice'''\n json_data=open(fnome).read()\n diz=json.loads(json_data)\n radice=trovaradice(diz)\n diz2=elimina_albero(diz,x,radice)\n with open(fout,'w') as outfile:\n json.dump(diz2,outfile)\n \n \ndef trovaradice(diz):\n for chiave in diz.keys():\n radice=chiave\n break\n return radice\ndef elimina_albero(diz,x,nodo):\n valore=diz[nodo]\n if valore!=[]:\n diz2={nodo:valore}\n if x in valore:\n valore.remove(x)\n for el in valore:\n diz3=elimina_albero(diz,x,el)\n diz2.update(diz3) \n elif valore==[]:\n diz2={nodo:[]}\n return diz2\n\n \n\ndef dizionario_livelli(fnome,fout):\n '''inserire qui il vostro codice'''\n lista2=[]\n \n with open(fnome,'r') as file:\n diz=json.load(file)\n #print(diz)\n radice=trovaradice(diz) #ok\n i=0\n lista=livello(diz,radice,lista2,i)\n massimo=trovamassimo(lista)\n diz2=analisi(lista,massimo)\n with open(fout,'w') as outfile:\n json.dump(diz2,outfile)\n \n \n \ndef livello(diz,nodo,lista2,i):\n valore=diz[nodo]\n if valore==None:\n lista2.append(i,nodo)\n else:\n \n lista2.append((i,nodo))\n i=i+1\n for figlio in valore:\n lista2=livello(diz,figlio,lista2,i)\n return lista2\ndef trovamassimo(lista):\n listamassimo=[]\n for a,b in lista:\n listamassimo.append(a)\n massimo=max(listamassimo)\n return massimo\n \n \ndef analisi(lista,massimo):\n lista1=[]\n x=0\n diz={}\n while x<=massimo:\n for el1,el2 in lista:\n if el1==x:\n lista1.append(el2)\n lista1.sort()\n diz[x]=lista1\n lista1=[]\n x+=1\n return diz \ndef dizionario_gradi_antenati(fnome,y,fout):\n '''inserire qui il vostro codice'''\n lista2=[]\n i=0\n k=0\n with open(fnome,'r') as file:\n diz=json.load(file)\n radice=trovaradice(diz)\n lista=antenati(diz,radice,lista2,i,y,k)\n diz=analisi2(lista)\n with open(fout,'w') as outfile:\n json.dump(diz,outfile)\ndef analisi2(lista):\n def sec_elem(lista):\n return lista[1] \n diz={}\n lista=sorted(lista, key=sec_elem) \n for chiave,valore in lista:\n diz[chiave]=valore\n return diz\ndef antenati(diz,nodo,lista2,i,y,k):\n valore=diz[nodo]\n if valore==None:\n return lista2 #prova\n else:\n lista2.append((nodo,k)) #prova\n i=i+1\n if len(valore)==y:\n k=k+1\n for figlio in valore:\n lista2=antenati(diz,figlio,lista2,i,y,k)\n return lista2 ", "sub_path": "students/1800408/homework04/program01.py", "file_name": "program01.py", "file_ext": "py", "file_size_in_byte": 7256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.loads", "line_number": 124, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 128, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 142, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 146, "usage_type": "call"}, {"api_name": "json.load", "line_number": 174, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 182, "usage_type": "call"}, {"api_name": "json.load", "line_number": 224, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "202951566", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport sqlite3\nimport datetime\n\n\ncurrent_date = datetime.datetime.now().strftime(\"%m_%d_%Y\")\ndef create_database():\n\tconn = sqlite3.connect('results.db')\n\tcursor = conn.cursor()\t\n\ttry:\n\t\tcursor.execute('''CREATE TABLE Nacebel_code'''+current_date+''' ([generated_id] INTEGER PRIMARY KEY,[nacebel_code] VARCHAR, [nacebel_text] VARCHAR)''')\n\texcept Exception as e:\n\t\tprint(e)\n\treturn cursor,conn\n\ndef start_chrom():\n\toptions = webdriver.ChromeOptions()\n\toptions.add_experimental_option(\"excludeSwitches\",[\"ignore-certificate-errors\"])\n\tdrvier = webdriver.Chrome(chrome_options=options)\n\treturn drvier\n\n\ndef start():\n\tcursor, conn = create_database()\n\tdriver = start_chrom()\n\tdriver.get(\"https://kbopub.economie.fgov.be/kbopub/zoekactiviteitform.html\")\n\tdriver.find_element_by_xpath(\"//*[@id='zoekactiviteitCommand']/table[1]/tbody/tr/td[2]/a\").click()\n\ttime.sleep(20)\n\tiframe = driver.find_element_by_tag_name('iframe')\t\n\tdriver.switch_to.frame(iframe)\n\ttime.sleep(7)\n\ttry:\n\t element = WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.ID, \"tab-1033\")))\n\t element.click()\n\texcept Exception as e:\n\t\tpass\n\ttime.sleep(2)\n\ttop_div = driver.find_element_by_xpath(\"//*[@id='treeview-1027']\")\n\tfirst_tables = top_div.find_elements_by_tag_name(\"table\")\n\tposition = 0\n\tdata = []\n\ta = 0\n\tflag = 0\n\tfor index in range(3000):\n\t\tif index < 53:\n\t\t\ta = index\n\t\ttables = top_div.find_elements_by_tag_name(\"table\")\n\t\tprint(\"------------------------------\")\n\t\tprint(\"++\"+str(index)+\"++\")\n\t\tif a % 52 == 0 and index != 0:\n\t\t\tprint(\"Over follow!\")\n\t\t\tdriver.execute_script(\"arguments[0].scrollTop = (arguments[0].scrollHeight)\", top_div)\n\t\t\ttime.sleep(2)\n\t\t\ta = 0\n\t\t\tflag = 1\n\t\tif 53 - len(tables)>0 and flag != 0:\n\t\t\tprint(\"Increase count!\")\n\t\t\ta = 0\n\t\t\tflag = 0\n\t\tif a - len(tables) > 0:\n\t\t\tprint(\"Increase count!\")\n\t\t\ta = 0\n\t\t\t\n\t\tprint(str(a))\n\t\tprint(str(len(tables)))\t\n\t\ttry:\n\t\t\ttables[a].find_element_by_class_name(\"x-tree-expander\").click()\n\t\t\ta = a + 1\n\t\texcept Exception as e:\n\t\t\ttry:\n\t\t\t\ttemp = tables[a].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[0].replace(\".\",\"\")\n\t\t\t\ttime.sleep(1)\n\t\t\t\ttables[a].find_element_by_class_name(\"x-tree-expander\").click()\n\t\t\t\ta = a + 1\n\t\t\texcept Exception as e:\n\t\t\t\ttry:\n\t\t\t\t\ttemp_data = tables[a].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[0].replace(\".\",\"\").strip()\n\t\t\t\t\tnacebell_text = tables[a].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[1].strip()\n\t\t\t\t\tdata.append(tables[a].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[0].replace(\".\",\"\").strip())\n\t\t\t\t\t# print('''INSERT INTO Nacebel_code_'''+current_date+''' (nacebel_code) VALUES ('''+ temp_data +''')''')\n\t\t\t\t\tif len(temp_data) != 2 or len(temp_data) != 1:\n\t\t\t\t\t\tconn.execute(\"INSERT INTO Nacebel_code\"+current_date+\" (nacebel_code, nacebel_text) VALUES ('\"+ temp_data +\"','\"+ nacebell_text +\"')\")\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tprint(type(temp_data))\n\t\t\t\t\tprint(temp_data)\n\t\t\t\t\tconn.commit()\n\t\t\t\t\ta = a + 1\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\t\t\t\t\ttime.sleep(5)\n\t\ttime.sleep(2)\n\t\n\n# def get_datas(driver, data, index):\n\t\n# \ttop_div = driver.find_element_by_xpath(\"//*[@id='treeview-1027']\")\n# \ttables = top_div.find_elements_by_tag_name(\"table\")\n# \ttry:\n# \t\ttables[index].find_element_by_class_name(\"x-tree-expander\").click()\n# \texcept Exception as e:\n# \t\ttry:\n# \t\t\ttemp = tables[index].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[0].replace(\".\",\"\")\n# \t\t\ttime.sleep(1)\n# \t\t\ttables[index].find_element_by_class_name(\"x-tree-expander\").click()\n# \t\texcept Exception as e:\n# \t\t\ttry:\n# \t\t\t\tdata.append(tables[index].find_element_by_class_name(\"x-tree-node-text\").text.split(\"-\")[0].replace(\".\",\"\"))\n# \t\t\t\tprint(data)\n\n# \tif index == 53:\n# \t\tindex = 29\n\n\t\t# except Exception as e:\n\t\t# \tpass\n\t\t# try:\n\t\t# \tfirst_table = driver.find_element_by_xpath(\"//*[@id='treeview-1027-record-\"+str(index+3)+\"']\")\n\t\t# \ttime.sleep(1)\n\t\t# \tfirst_table.find_element_by_class_name(\"x-tree-elbow-img\").click()\n\t\t# \ttime.sleep(2)\n\t\t# except Exception as e:\n\t\t# \tfirst_table = driver.find_element_by_xpath(\"//*[@id='treeview-1027-record-\"+str(index+3)+\"']\")\n\t\t# \ttime.sleep(1)\n\t\t# \tfirst_table.find_element_by_class_name(\"x-tree-elbow-img\").click()\n\t\t# \ttime.sleep(2)\n\t\t# position = len(first_tables) + 1\n\t\n\nstart()\n\t# print(len(first_tables))\n", "sub_path": "Belgium(sara L)/get_nacebel_code.py", "file_name": "get_nacebel_code.py", "file_ext": "py", "file_size_in_byte": 4517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "datetime.datetime.now", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 23, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "45343168", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# !@Time : 2021/4/14 下午3:57\n# !@Author : miracleyin @email: miracleyin@live.com\n# !@File : inference.py\n\nimport json\nimport csv\nfrom pathlib import Path\nfrom tqdm.notebook import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom datasets import InferenceDataset, inference_collate_batch\nfrom model.model import Classifier\n\ndef parse_args():\n \"\"\"arguments\"\"\"\n config = {\n \"data_dir\": \"./Dataset\",\n \"model_path\": \"./model.ckpt\",\n \"output_path\": \"./output.csv\",\n }\n\n return config\n\n\ndef main(\n data_dir,\n model_path,\n output_path,\n):\n \"\"\"Main function.\"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"[Info]: Use {device} now!\")\n\n mapping_path = Path(data_dir) / \"mapping.json\"\n mapping = json.load(mapping_path.open())\n\n dataset = InferenceDataset(data_dir)\n dataloader = DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n drop_last=False,\n num_workers=8,\n collate_fn=inference_collate_batch,\n )\n print(f\"[Info]: Finish loading data!\", flush=True)\n\n speaker_num = len(mapping[\"id2speaker\"])\n model = Classifier(n_spks=speaker_num).to(device)\n model.load_state_dict(torch.load(model_path))\n model.eval()\n print(f\"[Info]: Finish creating model!\", flush=True)\n\n results = [[\"Id\", \"Category\"]]\n for feat_paths, mels in tqdm(dataloader):\n with torch.no_grad():\n mels = mels.to(device)\n outs = model(mels)\n preds = outs.argmax(1).cpu().numpy()\n for feat_path, pred in zip(feat_paths, preds):\n results.append([feat_path, mapping[\"id2speaker\"][str(pred)]])\n\n with open(output_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(results)\n\n\nif __name__ == \"__main__\":\n main(**parse_args())\n", "sub_path": "inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 1948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "torch.device", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "datasets.InferenceDataset", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 41, "usage_type": "call"}, {"api_name": "datasets.inference_collate_batch", "line_number": 47, "usage_type": "name"}, {"api_name": "model.model", "line_number": 52, "usage_type": "name"}, {"api_name": "model.model.Classifier", "line_number": 52, "usage_type": "call"}, {"api_name": "model.model.load_state_dict", "line_number": 53, "usage_type": "call"}, {"api_name": "model.model", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 53, "usage_type": "call"}, {"api_name": "model.model.eval", "line_number": 54, "usage_type": "call"}, {"api_name": "model.model", "line_number": 54, "usage_type": "name"}, {"api_name": "tqdm.notebook.tqdm", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 59, "usage_type": "call"}, {"api_name": "model.model", "line_number": 61, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "551020468", "text": "# Copyright (C) 2021 Intel Corporation.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n#\n\nimport lxml\n\ndef add_child(element, tag, text=None, **kwargs):\n child = lxml.etree.Element(tag)\n child.text = text\n for k,v in kwargs.items():\n child.set(k, v)\n element.append(child)\n return child\n\ndef get_node(etree, xpath):\n result = etree.xpath(xpath)\n assert len(result) <= 1, \\\n \"Internal error: cannot get texts from multiple nodes at a time. \" \\\n \"Rerun the Board Inspector with `--loglevel debug`. If this issue persists, \" \\\n \"log a new issue at https://github.com/projectacrn/acrn-hypervisor/issues and attach the full logs.\"\n return result[0] if len(result) == 1 else None\n", "sub_path": "misc/config_tools/board_inspector/extractors/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "lxml.etree.Element", "line_number": 9, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "51382394", "text": "import cv2\nimport torch\nimport numpy as np\n\nimport global_vars\nimport models\nfrom filters import skinMask,greyMask\nfrom models import load_model, predict_gesture\nfrom utils import *\n\n\nclass recognizer:\n def __init__(self):\n # CNN\n self.model = load_model()\n if torch.cuda.is_available():\n self.gpu = True\n self.model.cuda()\n self.prediction_frequency = 10 # each 10 images arise a prediction\n self.prediction_count = 0\n self.camera_height = 300\n self.camera_width = 300\n\n def get_hand_img(self, raw_img, x, y,fix=True):\n '''\n cut the part of img having hand.\n raw_img: ndarray, (255,255,3)\n x,y: right wrist coordinate\n '''\n if not fix:\n if x - self.camera_width // 2 < 0:\n x0 = 0\n elif x + self.camera_width // 2 > raw_img.shape[1]:\n x0 = raw_img.shape[1] - self.camera_width\n else:\n x0 = x - self.camera_width\n\n if y - self.camera_height*2 < 0:\n y0 = 0\n # elif y + self.camera_height > raw_img.shape[0]:\n # y0 = raw_img.shape[0] - self.camera_height\n else:\n y0 = x - self.camera_height*2\n else:\n x0, y0 = 350,300\n\n # img = greyMask(raw_img, x0, y0, self.camera_width, self.camera_height)\n img = skinMask(raw_img, x0, y0, self.camera_width, self.camera_height)\n\n return img\n\n def recognize(self, img):\n gesture = predict_gesture(self.model, img,\n self.gpu, verbose=True)\n return gesture\n", "sub_path": "realtime_gesture_recog/recog.py", "file_name": "recog.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "models.load_model", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "filters.skinMask", "line_number": 48, "usage_type": "call"}, {"api_name": "models.predict_gesture", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "477814710", "text": "import itertools\nimport numpy as np\nfrom list_rotations import list_rotations\n\n\ndef get_combinations(coordinate_system, point_to_reference_corner_of_cube, cube_parts, shape, dimension):\n combinations = [coordinate_system]\n for cube_part in cube_parts:\n combinations_new = []\n for combination in combinations:\n for index in itertools.product(*[range(0, n) for n in shape]):\n position_in_cube = np.add(point_to_reference_corner_of_cube, np.array(index))\n cube_part_in_reference_array = np.zeros(coordinate_system.shape)\n cube_part_in_reference_array[(slice(position_in_cube[0], position_in_cube[0] + cube_part.shape[0]),\n slice(position_in_cube[1], position_in_cube[1] + cube_part.shape[1]),\n slice(position_in_cube[2], position_in_cube[2] + cube_part.shape[2]))] \\\n += cube_part\n for rotated_cube_part in list_rotations(cube_part_in_reference_array, dimension):\n combination_with_rotated_cube_part = combination + rotated_cube_part\n combinations_new.append(combination_with_rotated_cube_part)\n combinations = combinations_new\n\n return combinations\n", "sub_path": "combinations.py", "file_name": "combinations.py", "file_ext": "py", "file_size_in_byte": 1295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "itertools.product", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "list_rotations.list_rotations", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "512743474", "text": "# _*_ coding:utf-8 _*_\r\n# redis未授权检测脚本 单线程版\r\n# 使用环境;\r\n# 1.Python 3.8.10\r\n# 2.python安装redis和func_timeout\r\n#\r\n# windows环境:管理员身份\r\n# pip3 install func_timeout\r\n# pip3 install redis\r\n# Linux环境: sudo easy_install redis \r\n# sudo easy_install func_timeout\r\n#=========================================================\r\n# 使用命令\r\n# url.txt导入的目标,格式为 IP:端口 示例 119.45.56.123:6379\r\n# python.exe ./redisOneThread.py\r\n#\r\n\r\nimport redis,time\r\nfrom func_timeout import func_set_timeout\r\nimport func_timeout\r\n\r\n\r\nfile=\"./url.txt\"\r\nsuccess_save_filename=\"./success_redisOneThread.txt\"\r\nredis_row_list=[]\r\n#按行读取文本\r\ndef readfile(file):\r\n file = open(file) \r\n while 1:\r\n lines = file.readlines(100000)\r\n if not lines:\r\n break\r\n for line in lines:\r\n list2 = line.replace(\"\\n\", \"\").split(\":\", 1)\r\n redis_row_list.append(list2)\r\n file.close()\r\n#将存在漏洞的数据保存到文件\r\ndef writefile(filename,context):\r\n fo = open(filename, \"a\")\r\n fo.write(context)\r\n fo.close()\r\n#发送检测漏洞语句reds.info\r\ndef redisSendFifo():\r\n for line in redis_row_list:\r\n print(\"准备检测:\"+line[0])\r\n try:\r\n r=checkTimeOut(line)\r\n if \"redis_build_id\" in r:\r\n writefile(success_save_filename,line[0]+\":\"+line[1]+\"\\n\")\r\n print(line[0]+\":\"+line[1]+\" 存在未授权漏洞\")\r\n except func_timeout.exceptions.FunctionTimedOut:\r\n writefile(\"./chaoshi.txt\",line[0]+\":\"+line[1]+\"\\n\")\r\n print('执行函数超时')\r\n \r\n \r\n#真正发送检测函数\r\n@func_set_timeout(5)#设定函数超执行时间_\r\ndef checkTimeOut(line):\r\n try:\r\n r=redis.Redis(host=line[0], port=line[1], db=0,socket_connect_timeout=3)\r\n return r.info()\r\n except :\r\n return \"error\"\r\n#主函数\r\nif __name__ == '__main__':\r\n\r\n readfile(file)\r\n redisSendFifo()\r\n \r\n ", "sub_path": "redisOneThread.py", "file_name": "redisOneThread.py", "file_ext": "py", "file_size_in_byte": 2111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "func_timeout.exceptions", "line_number": 51, "usage_type": "attribute"}, {"api_name": "redis.Redis", "line_number": 60, "usage_type": "call"}, {"api_name": "func_timeout.func_set_timeout", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "527373667", "text": "#!/usr/bin/env python3\nfrom datetime import datetime\nfrom phue import Bridge\nimport logging\n\nlogging.basicConfig() # allows output to monitor/log file\n\nb = Bridge('192.168.0.202') #assign hue bridge\nb.connect() #connect to bridge\nb.get_api()\nlights = b.lights\nb.get_group()\n\ndef poff():\n \n global lights\n \n if b.get_light('Porch', 'on') == True:\n\n #Turn on the porch light\n b.set_light('Porch', 'on', False, transitiontime=1)\n\n #Write to console and log file\n print (\"%s | It is getting dark and the porch light is on. Turning it off now.\\n\" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n with open('/home/pi/Desktop/py/porch/log.log', 'a') as f:\n f.write(\"%s | It is getting dark and the porch light is on. Turning it off now.\\n\" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n else:\n print (\"%s | It is getting dark and the porch light is already off. Doing nothing.\\n\" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n with open('/home/pi/Desktop/py/porch/log.log', 'a') as f:\n f.write(\"%s | It is getting dark and the porch light is already off . Doing nothing.\\n\" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\npoff()\n", "sub_path": "Python/Porch/porch_off.py", "file_name": "porch_off.py", "file_ext": "py", "file_size_in_byte": 1226, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "phue.Bridge", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "620982439", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/sensors/pubsub_sensor.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 4319 bytes\nfrom airflow.contrib.hooks.gcp_pubsub_hook import PubSubHook\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass PubSubPullSensor(BaseSensorOperator):\n __doc__ = \"Pulls messages from a PubSub subscription and passes them through XCom.\\n\\n This sensor operator will pull up to ``max_messages`` messages from the\\n specified PubSub subscription. When the subscription returns messages,\\n the poke method's criteria will be fulfilled and the messages will be\\n returned from the operator and passed through XCom for downstream tasks.\\n\\n If ``ack_messages`` is set to True, messages will be immediately\\n acknowledged before being returned, otherwise, downstream tasks will be\\n responsible for acknowledging them.\\n\\n ``project`` and ``subscription`` are templated so you can use\\n variables in them.\\n \"\n template_fields = ['project', 'subscription']\n ui_color = '#ff7f50'\n\n @apply_defaults\n def __init__(self, project, subscription, max_messages=5, return_immediately=False, ack_messages=False, gcp_conn_id='google_cloud_default', delegate_to=None, *args, **kwargs):\n \"\"\"\n :param project: the GCP project ID for the subscription (templated)\n :type project: str\n :param subscription: the Pub/Sub subscription name. Do not include the\n full subscription path.\n :type subscription: str\n :param max_messages: The maximum number of messages to retrieve per\n PubSub pull request\n :type max_messages: int\n :param return_immediately: If True, instruct the PubSub API to return\n immediately if no messages are available for delivery.\n :type return_immediately: bool\n :param ack_messages: If True, each message will be acknowledged\n immediately rather than by any downstream tasks\n :type ack_messages: bool\n :param gcp_conn_id: The connection ID to use connecting to\n Google Cloud Platform.\n :type gcp_conn_id: str\n :param delegate_to: The account to impersonate, if any.\n For this to work, the service account making the request\n must have domain-wide delegation enabled.\n :type delegate_to: str\n \"\"\"\n (super(PubSubPullSensor, self).__init__)(*args, **kwargs)\n self.gcp_conn_id = gcp_conn_id\n self.delegate_to = delegate_to\n self.project = project\n self.subscription = subscription\n self.max_messages = max_messages\n self.return_immediately = return_immediately\n self.ack_messages = ack_messages\n self._messages = None\n\n def execute(self, context):\n super(PubSubPullSensor, self).execute(context)\n return self._messages\n\n def poke(self, context):\n hook = PubSubHook(gcp_conn_id=(self.gcp_conn_id), delegate_to=(self.delegate_to))\n self._messages = hook.pull(self.project, self.subscription, self.max_messages, self.return_immediately)\n if self._messages:\n if self.ack_messages:\n if self.ack_messages:\n ack_ids = [m['ackId'] for m in self._messages if m.get('ackId')]\n hook.acknowledge(self.project, self.subscription, ack_ids)\n return self._messages", "sub_path": "pycfiles/apache_airflow_arup-1.10.5-py3.6/pubsub_sensor.cpython-36.py", "file_name": "pubsub_sensor.cpython-36.py", "file_ext": "py", "file_size_in_byte": 3632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "airflow.sensors.base_sensor_operator.BaseSensorOperator", "line_number": 12, "usage_type": "name"}, {"api_name": "airflow.utils.decorators.apply_defaults", "line_number": 17, "usage_type": "name"}, {"api_name": "airflow.contrib.hooks.gcp_pubsub_hook.PubSubHook", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "190613859", "text": "import numpy as np\nimport torch\n\nclass PrototypicalBatchSampler(object):\n def __init__(self, labels, class_idxs, num_way, num_support, num_query, num_episode):\n super(PrototypicalBatchSampler, self).__init__()\n\n self.class_idxs = class_idxs\n self.num_way = num_way\n self.num_sample = num_support + num_query\n self.num_episode = num_episode\n\n self.classes, self.counts = np.unique(labels, return_counts=True)\n\n # index table\n self.indices = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan\n for idx, label in enumerate(labels):\n class_idx = label\n self.indices[class_idx, np.argwhere(np.isnan(self.indices[class_idx]))[0]] = idx\n \n class_idxs = self.class_idxs[torch.randperm(len(self.class_idxs))[:self.num_way]]\n \n def __iter__(self):\n for episode in range(self.num_episode):\n batch_size = self.num_way * self.num_sample\n batch = np.zeros(batch_size, dtype=int)\n class_idxs = self.class_idxs[torch.randperm(len(self.class_idxs))[:self.num_way]]\n\n for i, c_idx in enumerate(class_idxs):\n c_size = int(self.counts[c_idx])\n s_idxs = torch.randperm(c_size)[:self.num_sample]\n batch[i*self.num_sample : (i+1)*self.num_sample] = self.indices[c_idx][s_idxs]\n\n yield batch\n\n def __len__(self):\n return self.num_episode", "sub_path": "fsssl3d/data/prototypical_batch_sampler.py", "file_name": "prototypical_batch_sampler.py", "file_ext": "py", "file_size_in_byte": 1455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.unique", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.argwhere", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "559871441", "text": "import random\nimport networkx as nx\n\n\nclass FF():\n def __init__(self, param_settings=None):\n pass\n\n def run_samping(self, G, rate):\n size = round(len(G) * rate)\n list_nodes = list(G.nodes())\n dictt = set()\n random_node = random.choice(list_nodes)\n q = set() # q = set contains the distinct values\n q.add(random_node)\n Gs = nx.Graph()\n while (len(Gs.nodes()) < size):\n if (len(q) > 0):\n initial_node = q.pop()\n if (initial_node not in dictt):\n dictt.add(initial_node)\n neighbours = list(G.neighbors(initial_node))\n np = random.randint(1, len(neighbours))\n for x in neighbours[:np]:\n if (len(Gs.nodes()) < size):\n Gs.add_edge(initial_node, x)\n q.add(x)\n else:\n break\n else:\n continue\n else:\n random_node = random.sample(set(list_nodes) and dictt, 1)[0]\n q.add(random_node)\n q.clear()\n return Gs\n", "sub_path": "BackEnd/utils/sampling_algorithms/FF.py", "file_name": "FF.py", "file_ext": "py", "file_size_in_byte": 1196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 16, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "234500338", "text": "from sklearn.datasets import make_circles\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy import exp\nfrom scipy.linalg import eigh\n\ndef rbf_kernel_pca(X, gamma, n_components):\n # Calculate pairwise squared Euclidean distances\n sq_dists = pdist(X, 'sqeuclidean')\n #X에 대해서 square euclidean distance vector로 표현한다.\n #예를 들어사 X = [[1,3,4], [5,5,5], [6,5,7]]이면 총 3개의 sample이 있으니깐\n #3C2의 총 3가지의 distance elements가 있다.\n \n # Convert pairwise distances into a square matrix.\n mat_sq_dists = squareform(sq_dists)\n #이걸로 distance matrix를 만든다.\n #예를 들어서 d_12 = distance from 1 to 2이다.\n \n # Compute the symmetric kernel matrix.\n K = exp(-gamma*mat_sq_dists)\n #gamma에 대해서는 kernel equation을 참조\n\n # Center the kernel matrix.\n N = len(K)\n one_N = np.ones((N,N)) / N\n \n K = K - one_N.dot(K) - K.dot(one_N) + one_N.dot(K).dot(one_N)\n \n eigvals, eigvecs = eigh(K)\n # eigh returns them in sorted order ascending order인듯 \n alphas = np.column_stack((eigvecs[:,-i]) for i in range(1,n_components+1))\n #numpy.column_stack : Stack 1-D arrays as columns into a 2-D array.\n \n \n return alphas\n\nX, y = make_circles(n_samples = 1000, random_state = 123, noise = 0.1, factor = 0.2)\nplt.scatter(X[y==0, 0], X[y==0, 1], color = 'red', marker = '^', alpha = 0.5)\nplt.scatter(X[y==1, 0], X[y==1, 1], color = 'blue', marker = 'o', alpha = 0.5)\nplt.show()\n\n#Let's start with the standard PCA approach to compare it with the results of the RBF\n#kernel PCA:\n\nscikit_pca = PCA(n_components = 2)\nX_spca = scikit_pca.fit_transform(X)\n\nfig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))\nax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)\nax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)\nax[1].scatter(X_spca[y==0, 0], np.zeros((500,1))+0.02, color='red', marker='^', alpha=0.5)\nax[1].scatter(X_spca[y==1, 0], np.zeros((500,1))-0.02, color='blue', marker='o', alpha=0.5)\nax[0].set_xlabel('PC1')\nax[0].set_ylabel('PC2')\nax[1].set_ylim([-1, 1])\nax[1].set_yticks([])\nax[1].set_xlabel('PC1')\nplt.show()\n\n#Given an appropriate value for gamma , let's see if we are luckier using the RBF kernel\n#PCA implementation:\n\nX_kpca = rbf_kernel_pca(X, gamma = 15, n_components = 2)\nfig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))\n\nax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='^', alpha=0.5)\nax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='o', alpha=0.5)\nax[1].scatter(X_kpca[y==0, 0], np.zeros((500,1))+0.02, color='red', marker='^', alpha=0.5)\nax[1].scatter(X_kpca[y==1, 0], np.zeros((500,1))-0.02, color='blue', marker='o', alpha=0.5)\nax[0].set_xlabel('PC1')\nax[0].set_ylabel('PC2')\nax[1].set_ylim([-1, 1])\nax[1].set_yticks([])\nax[1].set_xlabel('PC1')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "chapter5/chapter5_ex5.py", "file_name": "chapter5_ex5.py", "file_ext": "py", "file_size_in_byte": 3047, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "scipy.spatial.distance.pdist", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.linalg.eigh", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_circles", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "216013651", "text": "from address import Address\nfrom customer import Customer\nfrom transaction import Transaction\nfrom utility import get_current_date, get_current_time, global_customer_map, global_transactions, global_branches, \\\n send_message\n\n\nclass Account(object):\n \"\"\"\n Maintains a structure for all accounts\n :param str account_number: account_number of account\n :param int balance: starting balance of account\n :param Customer customer: associated customer\n :param int max_transaction_amount: maximum transaction amount allowed\n :param str branch_code: branch associated with account\n \"\"\"\n\n def __init__(self, account_number, balance, customer, max_transaction_amount, branch_code):\n \"\"\"\n Initialisation function for Account class\n \"\"\"\n self.account_number = account_number\n self.balance = balance\n self.customer = customer\n self.max_transaction_amount = max_transaction_amount\n self.branch_code = branch_code\n\n def __str__(self):\n \"\"\"\n :return printable string for an object of Account class\n :rtype str\n \"\"\"\n return str(\n f'Account Number: {self.account_number}\\nCustomer ID: {self.customer.customer_id}\\nBalance'\n f' INR{str(self.balance)}\\nMaximum Transaction Amount{str(self.max_transaction_amount)}\\nBranch Code'\n f'{self.branch_code}')\n\n def input_account(self):\n \"\"\"\n Input function to take values from the user and assign it to an object of Account class\n \"\"\"\n while True:\n ch = input('Existing customer? (Y/N): ')\n # For existing customers, adds a new account to the customer.active_accounts dictionary\n if ch.upper() == 'Y':\n existing_customer_id = input('Existing Customer ID: ')\n if existing_customer_id in global_customer_map:\n print(f'Customer found. Adding account to customer ID #{existing_customer_id}')\n self.customer = global_customer_map[existing_customer_id]\n self.customer.active_accounts_number += 1\n break\n else:\n print('Customer ID does not exist. Recheck ID or register as a new customer.')\n elif ch.upper() == 'N':\n # For new customers, creates a new customer then adds a new account to the customer.active_accounts\n # dictionary\n self.customer = Customer('', '', Address('', '', '', '', '', '', '', ''), '', '', 0, '', {})\n self.customer.input_customer()\n self.customer.active_accounts_number += 1\n break\n while True:\n try:\n self.max_transaction_amount = int(input('Maximum Transaction Amount: '))\n break\n except ValueError:\n print('\\nInvalid Value\\n')\n while True:\n try:\n self.balance = int(input('Initial Balance: '))\n break\n except ValueError:\n print('\\nInvalid Value\\n')\n while True:\n branch_code = input('Branch Code: ')\n if branch_code in global_branches:\n break\n else:\n print('\\nInvalid Branch Code\\n')\n self.account_number = str(\n self.customer.customer_id + branch_code + str(\"%02d\" % self.customer.active_accounts_number))\n self.customer.active_accounts[self.account_number] = self\n print(f'Account created successfully! Account ID: {self.account_number}')\n # Add creation of account to transactions log\n global_transactions.append(\n Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),\n self.get_branch_code(), 'NA', 0, self.balance,\n f'Account {self.account_number} created successfully!'))\n send_message(\n f'Greetings from Bank XXX!\\nYour Customer ID {self.customer.customer_id}\\nYour Account Number '\n f'{self.account_number}.\\nBalance INR{self.balance}\\nYour account has been created successfully.',\n self.customer.phone_number)\n\n def delete_account(self, pop_from_list):\n \"\"\"\n Delete function to delete an object of Account class\n \"\"\"\n # Add deletion of account to transactions log\n global_transactions.append(\n Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),\n self.get_branch_code(), 'NA', self.balance, 0,\n f'Account {self.account_number} deleted successfully!'))\n self.customer.active_accounts_number -= 1\n if pop_from_list:\n self.customer.active_accounts.pop(self.account_number)\n print(f'Account {str(self.account_number)} deleted successfully! Closing Balance: INR{str(self.balance)}')\n send_message(\n f'Greetings from Bank XXX!\\nYour Customer ID {self.customer.customer_id}\\nYour Account Number '\n f'{self.account_number}.\\nYour account has been deleted successfully.', self.customer.phone_number)\n\n def modify_account(self):\n \"\"\"\n Modify function to modify an object of Account class\n \"\"\"\n modify_account_list = ['1. Modify Maximum Transaction Amount']\n for i in modify_account_list:\n print('\\t' + i)\n print()\n ch = input('Command: ')\n if ch == '1':\n while True:\n try:\n self.max_transaction_amount = int(input('New Maximum Transaction Amount: '))\n break\n except ValueError:\n print('\\nInvalid Value\\n')\n global_transactions.append(\n Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),\n self.get_branch_code(), 0, self.balance, self.balance,\n 'Maximum Transaction Amount modified successfully!'))\n send_message(\n f'Greetings from Bank XXX!\\nYour Customer ID {self.customer.customer_id}\\nYour Account Number '\n f'{self.account_number}.\\nYour account has been modified successfully.', self.customer.phone_number)\n\n def deposit(self, amount):\n \"\"\"\n Deposit function to deposit money into account\n \"\"\"\n if int(amount) <= 0:\n # Validation rule: Amount is negative\n print('Invalid amount. Please enter positive values.\\nTransaction aborted!')\n elif int(amount) > self.max_transaction_amount:\n # Validation rule: Amount is more than maximum set by the customer\n print('Amount entered is more than the maximum.\\nTransaction aborted!')\n else:\n self.balance += int(amount)\n # Add deposit transaction to transactions log\n global_transactions.append(\n Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),\n self.get_branch_code(), amount, str(int(self.balance) - int(amount)), self.balance,\n f'{str(amount)} deposited successfully!'))\n send_message(\n f'Greetings from Bank XXX!\\nYour Customer ID {self.customer.customer_id}.\\nYou have deposited '\n f'{str(amount)} from Account #{self.account_number}\\nClosing Balance: INR{self.balance}',\n self.customer.phone_number)\n\n def withdraw(self, amount):\n \"\"\"\n Withdraw function to withdraw money from account\n \"\"\"\n if int(amount) <= 0:\n # Validation rule: Amount is negative\n print('Invalid amount. Please enter positive values.\\nTransaction aborted!')\n elif int(amount) > self.max_transaction_amount:\n # Validation rule: Amount is more than maximum set by the customer\n print('Amount entered is more than the maximum.\\nTransaction aborted!')\n elif int(amount) > self.balance:\n # Validation rule: Amount is more than current balance\n print('Amount entered is more than balance.\\nTransaction aborted!')\n else:\n self.balance -= int(amount)\n # Add withdrawal transaction to transactions log\n global_transactions.append(\n Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),\n self.get_branch_code(), amount, str(int(self.balance) + int(amount)), str(self.balance),\n f'{str(amount)} withdrawn successfully!'))\n send_message(\n f'Greetings from Bank XXX!\\nYour Customer ID {self.customer.customer_id}.\\nYou have withdrawn '\n f'{str(amount)} from Account #{self.account_number}\\nClosing Balance: INR{self.balance}',\n self.customer.phone_number)\n\n def get_branch_code(self):\n \"\"\"\n :return branch_code of the account, substring[4:8]\n :rtype str\n \"\"\"\n return self.account_number[4:8]\n", "sub_path": "account.py", "file_name": "account.py", "file_ext": "py", "file_size_in_byte": 9212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "utility.global_customer_map", "line_number": 47, "usage_type": "name"}, {"api_name": "utility.global_customer_map", "line_number": 49, "usage_type": "name"}, {"api_name": "customer.Customer", "line_number": 57, "usage_type": "call"}, {"api_name": "address.Address", "line_number": 57, "usage_type": "call"}, {"api_name": "utility.global_branches", "line_number": 75, "usage_type": "name"}, {"api_name": "utility.global_transactions.append", "line_number": 84, "usage_type": "call"}, {"api_name": "utility.global_transactions", "line_number": 84, "usage_type": "name"}, {"api_name": "transaction.Transaction", "line_number": 85, "usage_type": "call"}, {"api_name": "utility.get_current_date", "line_number": 85, "usage_type": "call"}, {"api_name": "utility.get_current_time", "line_number": 85, "usage_type": "call"}, {"api_name": "utility.send_message", "line_number": 88, "usage_type": "call"}, {"api_name": "utility.global_transactions.append", "line_number": 98, "usage_type": "call"}, {"api_name": "utility.global_transactions", "line_number": 98, "usage_type": "name"}, {"api_name": "transaction.Transaction", "line_number": 99, "usage_type": "call"}, {"api_name": "utility.get_current_date", "line_number": 99, "usage_type": "call"}, {"api_name": "utility.get_current_time", "line_number": 99, "usage_type": "call"}, {"api_name": "utility.send_message", "line_number": 106, "usage_type": "call"}, {"api_name": "utility.global_transactions.append", "line_number": 126, "usage_type": "call"}, {"api_name": "utility.global_transactions", "line_number": 126, "usage_type": "name"}, {"api_name": "transaction.Transaction", "line_number": 127, "usage_type": "call"}, {"api_name": "utility.get_current_date", "line_number": 127, "usage_type": "call"}, {"api_name": "utility.get_current_time", "line_number": 127, "usage_type": "call"}, {"api_name": "utility.send_message", "line_number": 130, "usage_type": "call"}, {"api_name": "utility.global_transactions.append", "line_number": 147, "usage_type": "call"}, {"api_name": "utility.global_transactions", "line_number": 147, "usage_type": "name"}, {"api_name": "transaction.Transaction", "line_number": 148, "usage_type": "call"}, {"api_name": "utility.get_current_date", "line_number": 148, "usage_type": "call"}, {"api_name": "utility.get_current_time", "line_number": 148, "usage_type": "call"}, {"api_name": "utility.send_message", "line_number": 151, "usage_type": "call"}, {"api_name": "utility.global_transactions.append", "line_number": 172, "usage_type": "call"}, {"api_name": "utility.global_transactions", "line_number": 172, "usage_type": "name"}, {"api_name": "transaction.Transaction", "line_number": 173, "usage_type": "call"}, {"api_name": "utility.get_current_date", "line_number": 173, "usage_type": "call"}, {"api_name": "utility.get_current_time", "line_number": 173, "usage_type": "call"}, {"api_name": "utility.send_message", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "593064721", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # /production\n url(r'^$', views.index, name='index'),\n url(r'^time$', views.time, name='time'),\n url(r'^time2$', views.time2, name='time2'),\n]\n", "sub_path": "production/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "12830167", "text": "from plotly.graph_objs import Bar, Layout\nfrom plotly import offline\n\nfrom die import Die\n\n# Create 3 different D6 dices\nd_1 = Die()\nd_2 = Die()\nd_3 = Die()\ndice_list = [d_1, d_2, d_3]\n\n# Other variables\nresults = []\nfrequencies = []\n\nnumber_of_rolls = 5000\n\nmin_roll_number = len(dice_list)\nmax_roll_number = min_roll_number * d_1.sides\n\n# Create the rolls\nfor roll in range(number_of_rolls):\n results.append(d_1.Roll() + d_2.Roll() + d_3.Roll())\n\n# Add the frequencies\nfor value in range(min_roll_number, max_roll_number + 1):\n frequencies.append(results.count(value))\n\n# Visualize and style the hystogram\n\nx_values = [x for x in range(min_roll_number, max_roll_number + 1)]\ndata = [Bar(x=x_values, y=frequencies)]\n\nx_axis_config = {'title': 'Results', 'dtick':1}\ny_axis_config = {'title': 'Frequency of results'}\n\ncustom_layout = Layout(title=f'Results of rolling {min_roll_number} D6 dices {number_of_rolls} times',\n xaxis=x_axis_config, yaxis=y_axis_config)\n\noffline.plot({'data': data, 'layout': custom_layout}, filename='three_d6_dices.html')\n", "sub_path": "Data visualization/Chapter 1/plotply/three_d6.py", "file_name": "three_d6.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "die.Die", "line_number": 7, "usage_type": "call"}, {"api_name": "die.Die", "line_number": 8, "usage_type": "call"}, {"api_name": "die.Die", "line_number": 9, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 32, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 37, "usage_type": "call"}, {"api_name": "plotly.offline.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "318184495", "text": "'''\nDeveloper: Adam M. Terwilliger\nVersion: April 2, 2018\nPurpose: CSE 802 -- HW3 - Q2\nDetails: Pattern Recognition course at MSU\n Explore MLEs and Bayesian estimators\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import uniform\n\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\n# To report a bug or issue, use the following forum:\n# https://groups.google.com/forum/#!forum/astroml-general\nimport numpy as np\nfrom scipy.stats import uniform\nfrom matplotlib import pyplot as plt\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\n#from astroML.plotting import setup_text_plots\n#setup_text_plots(fontsize=8, usetex=True)\n\n#------------------------------------------------------------\n# Define the distribution parameters to be plotted\n#W_values = [1.0, 2.0, 3.0]\nW_values = [3.3333]\nlinestyles = ['-', '--', ':']\nmu = 0\nx = np.linspace(0, 0.6, 1000)\n#x = np.linspace(-2, 2, 1000)\n\n\n#------------------------------------------------------------\n# plot the distributions\nfig, ax = plt.subplots(figsize=(5, 3.75))\n\nfor W, ls in zip(W_values, linestyles):\n left = mu - 0.5 * W\n dist = uniform(left, W)\n\n plt.plot(x, dist.pdf(x), ls=ls, c='black',\n label=r'$\\mu=%i,\\ W=%i$' % (mu, W))\n\nplt.plot([0,0], [0, 0.3], c='black')\nplt.plot([0.6,0.6], [0, 0.3], c='black')\nplt.xlim(-0.5, 1.0)\nplt.ylim(0, 0.5)\n\nplt.xlabel(r'$\\theta$')\nplt.ylabel(r'$p(x|\\theta)$')\nplt.title('Uniform Distribution')\n\n#plt.legend()\nplt.savefig('hw3_q2.png')\nplt.show()\n", "sub_path": "hw3/hw3_q2.py", "file_name": "hw3_q2.py", "file_ext": "py", "file_size_in_byte": 1963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "scipy.stats.uniform", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "450770436", "text": "#!/usr/bin/env python\nfrom PIL import Image\nimport numpy as np\n\n\ndef dct(matrix):\n n,m = matrix.shape[0],matrix.shape[1]\n pi = np.pi\n result = np.zeros((m,n))\n for i in range(m):\n for j in range(n):\n if i == 0:\n ci = 1 / np.sqrt(m)\n else:\n ci = np.sqrt(2) / np.sqrt(m)\n if (j == 0):\n cj = 1 / np.sqrt(n);\n else:\n cj = np.sqrt(2) / np.sqrt(n)\n sum_ = 0\n for k in range(m):\n for l in range(n):\n dct1 = matrix[k,l] * \\\n np.cos((2 * k + 1) * i * pi / (2 * m)) * \\\n np.cos((2 * l + 1) * j * pi / (2 * n))\n sum_ = sum_ + dct1\n result[i,j] = ci * cj * sum_\n return result\n\n\nclass Hash():\n def __init__(self, binary_arr):\n self.hash = binary_arr\n def __sub__(self, other):\n return np.count_nonzero(self.hash.flatten() != other.hash.flatten())\n def __str__(self):\n return bin_to_hex(self.hash.flatten())\n def __repr__(self):\n return bin_to_hex(self.hash.flatten())\n\n\ndef bin_to_hex(arr):\n\tbit_string = ''.join(str(b) for b in 1 * arr.flatten())\n\twidth = 16\n\treturn '{:0>{width}x}'.format(int(bit_string, 2), width=width)\n\n\ndef phash(image):\n image = image.convert(\"L\").resize((32, 32), Image.ANTIALIAS)\n pixels = np.array(image)\n DCT = dct(pixels)\n low_freq = DCT[:8,:8]\n median = np.median(low_freq)\n res = low_freq > median\n return Hash(res)\n\n\ndef progressBar(value, endvalue, bar_length=20):\n percent = float(value) / endvalue\n arrow = '-' * int(round(percent * bar_length)-1) + '>'\n spaces = ' ' * (bar_length - len(arrow))\n sys.stdout.write(\"\\rProgress: [{0}] {1}%\".format(arrow + spaces, int(round(percent * 100))))\n sys.stdout.flush()\n\n\ndef find_similar_images(userpath):\n print(\"Process started.\")\n def is_image(filename):\n f = filename.lower()\n return f.endswith(\".png\") or f.endswith(\".jpg\") or f.endswith(\".jpeg\")\n image_files = [os.path.join(userpath, path) for path in os.listdir(userpath) if is_image(path)]\n n = len(image_files)\n images = {}\n for i,img in zip(range(n),image_files):\n progressBar(i,n,30)\n hash = phash(Image.open(img))\n images[hash] = images.get(hash, '') + img[len(userpath)+1:]\n progressBar(n,n,30)\n print('\\n')\n hashes = [*images.keys()]\n for i in range(n):\n for j in range(i+1,n):\n if hashes[i] - hashes[j] == 0:\n pixels1 = np.asarray(Image.open(image_files[i]))\n pixels2 = np.asarray(Image.open(image_files[j]))\n if pixels1.shape == pixels2.shape:\n if not np.any(pixels1-pixels2):\n print(f'{images[hashes[i]]} {images[hashes[j]]} (duplicate)')\n continue\n print(f'{images[hashes[i]]} {images[hashes[j]]} (modification)')\n else:\n score = 1-(hashes[i]-hashes[j])/64\n if score >= 0.7:\n print(f'{images[hashes[i]]} {images[hashes[j]]} (similar)')\n\n\ndef dir_path(path):\n if os.path.isdir(path):\n return path\n else:\n raise argparse.ArgumentTypeError(f\"readable_dir:{path} is not a valid path\")\n\n\nif __name__ == '__main__':\n import sys, os, argparse\n parser = argparse.ArgumentParser(description='This script identifies similar images in the directory using pHash method.')\n parser.add_argument('--path',type=dir_path, help='path to images', required=True)\n args = parser.parse_args()\n find_similar_images(args.path)\n", "sub_path": "find_similar_images.py", "file_name": "find_similar_images.py", "file_ext": "py", "file_size_in_byte": 3708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 49, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 87, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "300980711", "text": "import base64\nfrom datetime import timedelta\nimport logging\nimport time\nimport uuid\nimport warnings\nimport httpx\n\nfrom ably.types.capability import Capability\nfrom ably.types.tokendetails import TokenDetails\nfrom ably.types.tokenrequest import TokenRequest\nfrom ably.util.exceptions import AblyException, IncompatibleClientIdException\n\n__all__ = [\"Auth\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass Auth:\n\n class Method:\n BASIC = \"BASIC\"\n TOKEN = \"TOKEN\"\n\n def __init__(self, ably, options):\n self.__ably = ably\n self.__auth_options = options\n if options.token_details:\n self.__client_id = options.token_details.client_id\n else:\n self.__client_id = options.client_id\n self.__client_id_validated = False\n\n self.__basic_credentials = None\n self.__auth_params = None\n self.__token_details = None\n self.__time_offset = None\n\n must_use_token_auth = options.use_token_auth is True\n must_not_use_token_auth = options.use_token_auth is False\n can_use_basic_auth = options.key_secret is not None\n if not must_use_token_auth and can_use_basic_auth:\n # We have the key, no need to authenticate the client\n # default to using basic auth\n log.debug(\"anonymous, using basic auth\")\n self.__auth_mechanism = Auth.Method.BASIC\n basic_key = \"%s:%s\" % (options.key_name, options.key_secret)\n basic_key = base64.b64encode(basic_key.encode('utf-8'))\n self.__basic_credentials = basic_key.decode('ascii')\n return\n elif must_not_use_token_auth and not can_use_basic_auth:\n raise ValueError('If use_token_auth is False you must provide a key')\n\n # Using token auth\n self.__auth_mechanism = Auth.Method.TOKEN\n\n if options.token_details:\n self.__token_details = options.token_details\n elif options.auth_token:\n self.__token_details = TokenDetails(token=options.auth_token)\n else:\n self.__token_details = None\n\n if options.auth_callback:\n log.debug(\"using token auth with auth_callback\")\n elif options.auth_url:\n log.debug(\"using token auth with auth_url\")\n elif options.key_secret:\n log.debug(\"using token auth with client-side signing\")\n elif options.auth_token:\n log.debug(\"using token auth with supplied token only\")\n elif options.token_details:\n log.debug(\"using token auth with supplied token_details\")\n else:\n raise ValueError(\"Can't authenticate via token, must provide \"\n \"auth_callback, auth_url, key, token or a TokenDetail\")\n\n async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):\n self.__auth_mechanism = Auth.Method.TOKEN\n\n if token_params is None:\n token_params = dict(self.auth_options.default_token_params)\n else:\n self.auth_options.default_token_params = dict(token_params)\n self.auth_options.default_token_params.pop('timestamp', None)\n\n if auth_options is not None:\n self.auth_options.replace(auth_options)\n auth_options = dict(self.auth_options.auth_options)\n if self.client_id is not None:\n token_params['client_id'] = self.client_id\n\n token_details = self.__token_details\n if not force and not self.token_details_has_expired():\n log.debug(\"using cached token; expires = %d\",\n token_details.expires)\n return token_details\n\n self.__token_details = await self.request_token(token_params, **auth_options)\n self._configure_client_id(self.__token_details.client_id)\n return self.__token_details\n\n def token_details_has_expired(self):\n token_details = self.__token_details\n if token_details is None:\n return True\n\n expires = token_details.expires\n if expires is None:\n return False\n\n timestamp = self._timestamp()\n if self.__time_offset:\n timestamp += self.__time_offset\n\n return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER\n\n async def authorize(self, token_params=None, auth_options=None):\n return await self.__authorize_when_necessary(token_params, auth_options, force=True)\n\n async def authorise(self, *args, **kwargs):\n warnings.warn(\n \"authorise is deprecated and will be removed in v2.0, please use authorize\",\n DeprecationWarning)\n return await self.authorize(*args, **kwargs)\n\n async def request_token(self, token_params=None,\n # auth_options\n key_name=None, key_secret=None, auth_callback=None,\n auth_url=None, auth_method=None, auth_headers=None,\n auth_params=None, query_time=None):\n token_params = token_params or {}\n token_params = dict(self.auth_options.default_token_params,\n **token_params)\n key_name = key_name or self.auth_options.key_name\n key_secret = key_secret or self.auth_options.key_secret\n\n log.debug(\"Auth callback: %s\" % auth_callback)\n log.debug(\"Auth options: %s\" % self.auth_options)\n if query_time is None:\n query_time = self.auth_options.query_time\n query_time = bool(query_time)\n auth_callback = auth_callback or self.auth_options.auth_callback\n auth_url = auth_url or self.auth_options.auth_url\n\n auth_params = auth_params or self.auth_options.auth_params or {}\n\n auth_method = (auth_method or self.auth_options.auth_method).upper()\n\n auth_headers = auth_headers or self.auth_options.auth_headers or {}\n\n log.debug(\"Token Params: %s\" % token_params)\n if auth_callback:\n log.debug(\"using token auth with authCallback\")\n token_request = await auth_callback(token_params)\n elif auth_url:\n log.debug(\"using token auth with authUrl\")\n\n token_request = await self.token_request_from_auth_url(\n auth_method, auth_url, token_params, auth_headers, auth_params)\n else:\n token_request = await self.create_token_request(\n token_params, key_name=key_name, key_secret=key_secret,\n query_time=query_time)\n if isinstance(token_request, TokenDetails):\n return token_request\n elif isinstance(token_request, dict) and 'issued' in token_request:\n return TokenDetails.from_dict(token_request)\n elif isinstance(token_request, dict):\n token_request = TokenRequest.from_json(token_request)\n elif isinstance(token_request, str):\n return TokenDetails(token=token_request)\n\n token_path = \"/keys/%s/requestToken\" % token_request.key_name\n\n response = await self.ably.http.post(\n token_path,\n headers=auth_headers,\n body=token_request.to_dict(),\n skip_auth=True\n )\n\n AblyException.raise_for_response(response)\n response_dict = response.to_native()\n log.debug(\"Token: %s\" % str(response_dict.get(\"token\")))\n return TokenDetails.from_dict(response_dict)\n\n async def create_token_request(self, token_params=None,\n key_name=None, key_secret=None, query_time=None):\n token_params = token_params or {}\n token_request = {}\n\n key_name = key_name or self.auth_options.key_name\n key_secret = key_secret or self.auth_options.key_secret\n if not key_name or not key_secret:\n log.debug('key_name or key_secret blank')\n raise AblyException(\"No key specified: no means to generate a token\", 401, 40101)\n\n token_request['key_name'] = key_name\n if token_params.get('timestamp'):\n token_request['timestamp'] = token_params['timestamp']\n else:\n if query_time is None:\n query_time = self.auth_options.query_time\n\n if query_time:\n if self.__time_offset is None:\n server_time = await self.ably.time()\n local_time = self._timestamp()\n self.__time_offset = server_time - local_time\n token_request['timestamp'] = server_time\n else:\n local_time = self._timestamp()\n token_request['timestamp'] = local_time + self.__time_offset\n else:\n token_request['timestamp'] = self._timestamp()\n\n token_request['timestamp'] = int(token_request['timestamp'])\n\n ttl = token_params.get('ttl')\n if ttl is not None:\n if isinstance(ttl, timedelta):\n ttl = ttl.total_seconds() * 1000\n token_request['ttl'] = int(ttl)\n\n capability = token_params.get('capability')\n if capability is not None:\n token_request['capability'] = str(Capability(capability))\n\n token_request[\"client_id\"] = (\n token_params.get('client_id') or self.client_id)\n\n # Note: There is no expectation that the client\n # specifies the nonce; this is done by the library\n # However, this can be overridden by the client\n # simply for testing purposes\n token_request[\"nonce\"] = token_params.get('nonce') or self._random_nonce()\n\n token_request = TokenRequest(**token_request)\n\n if token_params.get('mac') is None:\n # Note: There is no expectation that the client\n # specifies the mac; this is done by the library\n # However, this can be overridden by the client\n # simply for testing purposes.\n token_request.sign_request(key_secret.encode('utf8'))\n else:\n token_request.mac = token_params['mac']\n\n return token_request\n\n @property\n def ably(self):\n return self.__ably\n\n @property\n def auth_mechanism(self):\n return self.__auth_mechanism\n\n @property\n def auth_options(self):\n return self.__auth_options\n\n @property\n def auth_params(self):\n return self.__auth_params\n\n @property\n def basic_credentials(self):\n return self.__basic_credentials\n\n @property\n def token_credentials(self):\n if self.__token_details:\n token = self.__token_details.token\n token_key = base64.b64encode(token.encode('utf-8'))\n return token_key.decode('ascii')\n\n @property\n def token_details(self):\n return self.__token_details\n\n @property\n def client_id(self):\n return self.__client_id\n\n @property\n def time_offset(self):\n return self.__time_offset\n\n def _configure_client_id(self, new_client_id):\n # If new client ID from Ably is a wildcard, but preconfigured clientId is set,\n # then keep the existing clientId\n if self.client_id != '*' and new_client_id == '*':\n self.__client_id_validated = True\n return\n\n # If client_id is defined and not a wildcard, prevent it changing, this is not supported\n if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:\n raise IncompatibleClientIdException(\n \"Client ID is immutable once configured for a client. \"\n \"Client ID cannot be changed to '{}'\".format(new_client_id), 400, 40012)\n\n self.__client_id_validated = True\n self.__client_id = new_client_id\n\n def can_assume_client_id(self, assumed_client_id):\n if self.__client_id_validated:\n return self.client_id == '*' or self.client_id == assumed_client_id\n elif self.client_id is None or self.client_id == '*':\n return True # client ID is unknown\n else:\n return self.client_id == assumed_client_id\n\n async def _get_auth_headers(self):\n if self.__auth_mechanism == Auth.Method.BASIC:\n # RSA7e2\n if self.client_id:\n return {\n 'Authorization': 'Basic %s' % self.basic_credentials,\n 'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))\n }\n return {\n 'Authorization': 'Basic %s' % self.basic_credentials,\n }\n else:\n await self.__authorize_when_necessary()\n return {\n 'Authorization': 'Bearer %s' % self.token_credentials,\n }\n\n def _timestamp(self):\n \"\"\"Returns the local time in milliseconds since the unix epoch\"\"\"\n return int(time.time() * 1000)\n\n def _random_nonce(self):\n return uuid.uuid4().hex[:16]\n\n async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):\n body = None\n params = None\n if method == 'GET':\n body = {}\n params = dict(auth_params, **token_params)\n elif method == 'POST':\n params = {}\n body = dict(auth_params, **token_params)\n\n from ably.http.http import Response\n async with httpx.AsyncClient(http2=True) as client:\n resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)\n response = Response(resp)\n\n AblyException.raise_for_response(response)\n try:\n token_request = response.to_native()\n except ValueError:\n token_request = response.text\n return token_request\n", "sub_path": "ably/rest/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 13689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "ably.types.capability", "line_number": 26, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 48, "usage_type": "call"}, {"api_name": "ably.types.tokendetails.TokenDetails", "line_number": 60, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 122, "usage_type": "call"}, {"api_name": "ably.types.tokendetails.TokenDetails", "line_number": 165, "usage_type": "argument"}, {"api_name": "ably.types.tokendetails.TokenDetails.from_dict", "line_number": 168, "usage_type": "call"}, {"api_name": "ably.types.tokendetails.TokenDetails", "line_number": 168, "usage_type": "name"}, {"api_name": "ably.types.tokenrequest.TokenRequest.from_json", "line_number": 170, "usage_type": "call"}, {"api_name": "ably.types.tokenrequest.TokenRequest", "line_number": 170, "usage_type": "name"}, {"api_name": "ably.types.tokendetails.TokenDetails", "line_number": 172, "usage_type": "call"}, {"api_name": "ably.util.exceptions.AblyException.raise_for_response", "line_number": 183, "usage_type": "call"}, {"api_name": "ably.util.exceptions.AblyException", "line_number": 183, "usage_type": "name"}, {"api_name": "ably.types.tokendetails.TokenDetails.from_dict", "line_number": 186, "usage_type": "call"}, {"api_name": "ably.types.tokendetails.TokenDetails", "line_number": 186, "usage_type": "name"}, {"api_name": "ably.util.exceptions.AblyException", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 222, "usage_type": "argument"}, {"api_name": "ably.types.capability.Capability", "line_number": 228, "usage_type": "call"}, {"api_name": "ably.types.tokenrequest.TokenRequest", "line_number": 239, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 276, "usage_type": "call"}, {"api_name": "ably.util.exceptions.IncompatibleClientIdException", "line_number": 300, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 321, "usage_type": "call"}, {"api_name": "time.time", "line_number": 334, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 337, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 350, "usage_type": "call"}, {"api_name": "ably.http.http.Response", "line_number": 352, "usage_type": "call"}, {"api_name": "ably.util.exceptions.AblyException.raise_for_response", "line_number": 354, "usage_type": "call"}, {"api_name": "ably.util.exceptions.AblyException", "line_number": 354, "usage_type": "name"}]} +{"seq_id": "208724565", "text": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nfrom pants.backend.project_info.tasks.ide_gen import Project, SourceSet\nfrom pants.source.source_root import SourceRootConfig\nfrom pants_test.base_test import BaseTest\n\n\nclass IdeGenTest(BaseTest):\n def test_collapse_source_root(self):\n self.context(for_subsystems=[SourceRootConfig], options={\n SourceRootConfig.options_scope: {\n 'source_roots': {\n '/src/java': [],\n '/tests/java': [],\n '/some/other': []\n },\n 'unmatched': 'fail'\n }\n })\n source_roots = SourceRootConfig.global_instance().get_source_roots()\n source_set_list = []\n self.assertEquals([], Project._collapse_by_source_root(source_roots, source_set_list))\n\n source_sets = [\n SourceSet('/repo-root', 'src/java', 'org/pantsbuild/app', False),\n SourceSet('/repo-root', 'tests/java', 'org/pantsbuild/app', True),\n SourceSet('/repo-root', 'some/other', 'path', False),\n ]\n\n results = Project._collapse_by_source_root(source_roots, source_sets)\n\n self.assertEquals(SourceSet('/repo-root', 'src/java', '', False), results[0])\n self.assertFalse(results[0].is_test)\n self.assertEquals(SourceSet('/repo-root', 'tests/java', '', True), results[1])\n self.assertTrue(results[1].is_test)\n # If there is no registered source root, the SourceSet should be returned unmodified\n self.assertEquals(source_sets[2], results[2])\n self.assertFalse(results[2].is_test)\n\n def test_source_set(self):\n source_set1 = SourceSet('repo-root', 'path/to/build', 'org/pantsbuild/project', False)\n # only the first 3 parameters are considered keys\n self.assertEquals(('repo-root', 'path/to/build', 'org/pantsbuild/project'),\n source_set1._key_tuple)\n source_set2 = SourceSet('repo-root', 'path/to/build', 'org/pantsbuild/project', True)\n # Don't consider the test flag\n self.assertEquals(source_set1, source_set2)\n\n def assert_dedup(self, expected, actual):\n self.assertEquals([expected], actual)\n # that test is not good enough, 'resources_only' and 'is_test' aren't considered keys for the set\n self.assertEquals(expected.resources_only, actual[0].resources_only)\n self.assertEquals(expected.is_test, actual[0].is_test)\n\n def test_dedup_sources_simple(self):\n self.assertEquals([\n SourceSet('foo', 'bar', ''),\n SourceSet('foo', 'bar', 'baz'),\n SourceSet('foo', 'bar', 'foobar')\n ],\n Project.dedup_sources([\n SourceSet('foo', 'bar', ''),\n SourceSet('foo', 'bar', 'foobar'),\n SourceSet('foo', 'bar', 'baz'),\n SourceSet('foo', 'bar', 'baz'),\n SourceSet('foo', 'bar', 'foobar'),\n SourceSet('foo', 'bar', 'foobar'),\n SourceSet('foo', 'bar', 'baz'),\n ]))\n\n def test_dedup_sources_resource_and_code(self):\n \"\"\"Show that a non-resources-only source set turns off the resources_only flag\"\"\"\n deduped_sources = Project.dedup_sources([\n SourceSet('foo', 'bar', 'baz', resources_only=True),\n SourceSet('foo', 'bar', 'baz'),\n SourceSet('foo', 'bar', 'baz', resources_only=True),\n ])\n self.assert_dedup(SourceSet('foo', 'bar', 'baz'), deduped_sources)\n\n def test_dedup_test_sources(self):\n \"\"\"Show that a is_test on a non resources_only source set turns on is_test\"\"\"\n deduped_sources = Project.dedup_sources([\n SourceSet('foo', 'bar', 'baz', is_test=True),\n SourceSet('foo', 'bar', 'baz'),\n SourceSet('foo', 'bar', 'baz', is_test=True),\n ])\n self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True), deduped_sources)\n\n def test_dedup_test_resources(self):\n \"\"\"Show that competting is_test values on a resources-only source set turns off is_test\"\"\"\n deduped_sources = Project.dedup_sources([\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n SourceSet('foo', 'bar', 'baz', is_test= False, resources_only=True),\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n ])\n self.assert_dedup(SourceSet('foo', 'bar', 'baz', resources_only=True), deduped_sources)\n\n def test__only_test_resources(self):\n deduped_sources = Project.dedup_sources([\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n ])\n self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n deduped_sources)\n\n def test_all_together(self):\n deduped_sources = Project.dedup_sources([\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=False),\n SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),\n SourceSet('foo', 'bar', 'baz', is_test=False, resources_only=True),\n SourceSet('foo', 'bar', 'baz', is_test=False, resources_only=False),\n ])\n self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True), deduped_sources)\n", "sub_path": "tests/python/pants_test/backend/project_info/tasks/test_ide_gen.py", "file_name": "test_ide_gen.py", "file_ext": "py", "file_size_in_byte": 5164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pants_test.base_test.BaseTest", "line_number": 13, "usage_type": "name"}, {"api_name": "pants.source.source_root.SourceRootConfig", "line_number": 15, "usage_type": "name"}, {"api_name": "pants.source.source_root.SourceRootConfig.options_scope", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pants.source.source_root.SourceRootConfig", "line_number": 16, "usage_type": "name"}, {"api_name": "pants.source.source_root.SourceRootConfig.global_instance", "line_number": 25, "usage_type": "call"}, {"api_name": "pants.source.source_root.SourceRootConfig", "line_number": 25, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project._collapse_by_source_root", "line_number": 27, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 27, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 30, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 31, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 32, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project._collapse_by_source_root", "line_number": 35, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 35, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 37, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 39, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 46, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 50, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 62, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 63, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 64, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 66, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 66, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 67, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 68, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 69, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 70, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 71, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 72, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 73, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 78, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 78, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 79, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 80, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 81, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 83, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 87, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 87, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 88, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 89, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 90, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 92, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 96, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 96, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 97, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 98, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 99, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 101, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 104, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 104, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 105, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 106, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 108, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project.dedup_sources", "line_number": 112, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.Project", "line_number": 112, "usage_type": "name"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 113, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 114, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 115, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 116, "usage_type": "call"}, {"api_name": "pants.backend.project_info.tasks.ide_gen.SourceSet", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "12212919", "text": "import requests\nimport calendar\nimport time\nimport json\nimport dateutil.parser\nimport datetime\nimport hashlib\nfrom docassemble.base.util import *\nfrom azure.storage.blob import BlockBlobService\n\nAD_URL = \"https://login.microsoftonline.com/a2pca.onmicrosoft.com/oauth2/token\"\nCITATION_LOOKUP_URL = 'https://a2papi.azurewebsites.net/api/case/citation'\nCASE_LOOKUP_URL = 'https://a2papi.azurewebsites.net/api/case/cases'\nSUBMIT_URL = 'https://a2papi.azurewebsites.net/api/request'\n\ndef fetch_citation_data(citation_number, county):\n citation_params = {\n 'num': citation_number,\n 'county': county\n }\n res = __do_request(CITATION_LOOKUP_URL, citation_params)\n return __format_response(res)\n\ndef fetch_case_data(first_name, last_name, dob, drivers_license, county):\n case_params = {\n 'firstName': first_name,\n 'lastName': last_name,\n 'dateOfBirth': \"%s/%s/%s\" % (dob.month, dob.day, dob.year),\n 'driversLicense': drivers_license,\n 'county': county\n }\n res = __do_request(CASE_LOOKUP_URL, case_params)\n return __format_response(res)\n\ndef date_from_iso8601(date_string):\n return dateutil.parser.parse(date_string).date()\n\ndef format_money(money_string):\n return '${:,.2f}'.format(money_string)\n\ndef __format_response(response, request_body=None):\n data = {}\n data['response_code'] = response.status_code\n\n if response.ok:\n data['data'] = response.json()\n data['success'] = True\n data['error'] = None\n\n if request_body:\n data['request_body'] = request_body\n else:\n data['data'] = {}\n data['success'] = False\n data['error'] = response.text\n\n return data\n\ndef __do_request(url, params):\n oauth_params = {\n 'resource': '3b347c8c-3faa-4331-8273-a5f575997d4e',\n 'grant_type': 'client_credentials',\n 'client_id': __get_a2p_config()[\"client_id\"],\n 'client_secret': __get_a2p_config()[\"client_secret\"],\n 'scope': 'openid 3b347c8c-3faa-4331-8273-a5f575997d4e'\n }\n r = requests.post(AD_URL, oauth_params)\n data = r.json()\n access_token = data['access_token']\n\n headers = { 'Authorization': 'Bearer ' + access_token, 'Content-Type': 'application/json' }\n return requests.post(url, data=None, json=params, headers=headers)\n\ndef __get_a2p_config():\n return get_config('a2p')\n\ndef __submit_image_from_url(url):\n blob_service = BlockBlobService(account_name='a2pca', account_key=__get_a2p_config()['blob_account_key'])\n image_body = requests.get(url).content\n filename = 'a2p_daupload_' + hashlib.sha224(image_body).hexdigest()\n blob_service.create_blob_from_bytes('attachments', filename, image_body)\n\n return {\n \"fileName\": filename,\n \"blobName\": filename,\n \"size\": len(image_body)\n }\n\ndef build_submit_payload(data, attachment_urls):\n benefit_files_data = []\n\n for url in attachment_urls:\n log(\"Uploading file: %s\" % url)\n image_meta = __submit_image_from_url(url)\n benefit_files_data.append(image_meta)\n\n no_proof_fields = [\n 'calfresh_no_proof',\n 'medi_cal_no_proof',\n 'ssi_no_proof',\n 'ssp_no_proof',\n 'cr_ga_no_proof',\n 'ihss_no_proof',\n 'tanf_no_proof'\n 'cal_works_no_proof',\n 'capi_no_proof',\n ]\n\n no_docs_upload_comments = []\n for field in no_proof_fields:\n reason = data.get(field + \"_reason\")\n if reason:\n no_docs_upload_comments.append(\"%s: %s\" % field, reason)\n\n case_information = data.get('case_information')\n\n benefits = data.get('benefits', {}).get('elements', {})\n no_benefits = True\n for benefit_name in ['cal_fresh', 'ssi', 'ssp', 'medi_cal', 'cr_ga', 'ihss', 'cal_works', 'tanf', 'capi', 'other']:\n if benefits.get(benefit_name):\n no_benefits = False\n\n submitted_on = datetime.datetime.now().isoformat()\n\n on_other_benefits = benefits.get('other', False)\n other_benefits_desc = None\n if on_other_benefits:\n other_benefits_desc = data.get('other_benefits_name')\n no_benefits = False\n\n violDescriptions = []\n idx = 0\n for charge in case_information.get('charges', {}):\n descr = []\n idx += 1\n descr.append(\"Count %s\" % idx)\n if charge.get('chargeCode'):\n descr.append(charge.get('chargeCode'))\n descr.append(charge.get('violationDescription'))\n violDescriptions.append(\"-\".join(descr))\n\n additional_requests = data.get('additional_requests', {}).get('elements', {})\n\n difficultyToVisitCourtDueTo = data.get(\"difficult_open_text\", \"\")\n for k, v in data.get('why_difficult', {}).get('elements', {}).items():\n if v:\n difficultyToVisitCourtDueTo += \"/ \" + k\n\n request_params = {\n \"requestStatus\": \"Submitted\",\n \"petition\": {\n \"noBenefits\": no_benefits,\n \"onFoodStamps\": benefits.get('cal_fresh', False),\n \"onSuppSecIncome\": benefits.get('ssi', False),\n \"onSSP\": benefits.get('ssp', False),\n \"onMedical\": benefits.get('medi_cal', False),\n \"onCountyRelief\": benefits.get('cr_ga', False),\n \"onIHSS\": benefits.get('ihss', False),\n \"onCalWorks\": benefits.get('cal_works', False),\n \"onTANF\": benefits.get('tanf', False),\n \"onCAPI\": benefits.get('capi', False),\n \"benefitFiles\": benefit_files_data,\n \"rent\": data.get('monthly_rent'),\n \"mortgage\": data.get('mortgage'),\n \"phone\": data.get('phone_bill'),\n \"food\": data.get('food'),\n \"insurance\": data.get('insurance'),\n \"isBenefitsProof\": len(attachment_urls) > 0,\n \"isCivilAssessWaiver\": False,\n \"clothes\": data.get('clothing'),\n \"childSpousalSupp\": data.get('child_spousal_support'),\n \"carPayment\": data.get('transportation'),\n \"utilities\": data.get('utilities'),\n \"otherExpenses\": [],\n \"isMoreTimeToPay\": additional_requests.get('extension', False),\n \"isPaymentPlan\": additional_requests.get('payment_plan', False),\n \"isReductionOfPayment\": True,\n \"isCommunityService\": additional_requests.get('community_service', False),\n \"isOtherRequest\": False,\n \"otherRequestDesc\": data.get('other_hardship'),\n \"selectAllRights\": True,\n \"representByAttorneyRight\": True,\n \"speedyTrialRight\": True,\n \"presentEvidenceRight\": True,\n \"testifyUnderOathRight\": True,\n \"remainSilentRight\": True,\n \"isPleadGuilty\": data.get('plea', '') == \"agree_guilty\",\n \"isPleadNoContest\": data.get('plea', '') == \"agree_no_contest\",\n \"supportingFiles\": [],\n \"noDocsToUploadReason\": \"See comments\",\n \"noDocsToUploadComments\": \"\\n\".join(no_docs_upload_comments),\n \"isDeclare\": True,\n \"onOtherBenefits\": on_other_benefits,\n \"onOtherBenefitsDesc\": other_benefits_desc,\n },\n \"caseInformation\": {\n \"caseNumber\": case_information.get('caseNumber'),\n \"citationDocumentId\": case_information.get('documentid'),\n \"citationNumber\": case_information.get('citationNumber'),\n \"civilAssessFee\": case_information.get('civilAssessFee'),\n \"county\": data.get('county'),\n \"fullName\": case_information.get('firstName', '') + ' ' + case_information.get('lastName', ''),\n \"totalDueAmt\": case_information.get('totalDueAmt'),\n \"violationDate\": case_information.get('charges', [])[0].get('violationDate'),\n \"violationDescription\": \"\\n\".join(violDescriptions),\n\n },\n \"benefitsStatus\": not no_benefits,\n \"defendantInformation\": {\n \"incomeAmount\": data.get('income'),\n \"incomeFrequency\": \"Month\",\n \"totalFamilyMembers\": data.get('residents'),\n },\n \"survey\": {\n \"isAddressedTrafficMatter\": data.get('tool_helpful', '') + ',' + data.get('tool_difficult', ''),\n \"willYouVisitCourt\": data.get('prefer'),\n \"difficultyToVisitCourtDueTo\": difficultyToVisitCourtDueTo,\n },\n \"submittedById\": \"0\",\n \"judgment\": \"Submitted\",\n \"submittedByEmail\": data.get('email'),\n \"submittedOn\": submitted_on,\n \"needMoreInformation\": [],\n \"toolRecommendations\": [],\n \"judicialOrder\": [],\n \"auditInformation\": [],\n \"__v\": 0\n }\n return request_params\n\n\ndef submit_interview(data, attachment_urls=[], debug=False):\n params = build_submit_payload(data, attachment_urls)\n log(\"Submitting Payload: %s\" % params)\n res = __do_request(SUBMIT_URL, params)\n\n if debug:\n return __format_response(res, params)\n else:\n return __format_response(res)\n\n\n#print(fetch_citation_data('CT98966', 'Tulare'))\n# print(fetch_case_data('john', 'doe', '11/26/1985', '12345', 'Santa Clara'))\n#print(submit_interview({ 'citationNumber': 1234 }))\n\n\n", "sub_path": "docassemble/jcc/abilitytopay/a2papi.py", "file_name": "a2papi.py", "file_ext": "py", "file_size_in_byte": 9169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "dateutil.parser.parser.parse", "line_number": 36, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 36, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 36, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 67, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 72, "usage_type": "call"}, {"api_name": "azure.storage.blob.BlockBlobService", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "hashlib.sha224", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "attribute"}]} +{"seq_id": "341788346", "text": "import requests\r\nfrom lxml import etree\r\nfrom keras.preprocessing.text import text_to_word_sequence as sq2wsq\r\nimport json\r\nfrom requests.adapters import HTTPAdapter\r\nimport time\r\n\r\nword_size=150\r\nheader={\"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134\"}\r\nfile=[]\r\nfile.append(\"phaseA_4b_01.json\")\r\n\r\nfor f_name in file:\r\n print(f_name)\r\n f=open(f_name,encoding=\"utf-8\")\r\n phase=json.load(f)\r\n phase=phase[\"questions\"]\r\n for e1,e2,in enumerate(phase):\r\n body=e2[\"body\"]\r\n body=sq2wsq(body)\r\n mid=[]\r\n for i in body:\r\n if len(i)>2 and i not in stopword:\r\n mid.append(i)\r\n body=mid\r\n xm='https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id='+UID+'&retmode=abstract&rettype=text'\r\n try:\r\n r=requests.get(xm,headers=header,timeout=15)\r\n except:\r\n r=requests.get(xm,headers=header,timeout=15)", "sub_path": "RAE-Recursive-AutoEncoder-for-bioasq-taskB-phaseA-snippets-retrieve-/get-s.py", "file_name": "get-s.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.text_to_word_sequence", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "391691244", "text": "import config\nimport dataloader\nimport engine \nimport ImageTransformer\n\nimport transformers\nimport torch \nimport torch.nn as nn\nimport numpy as np \nimport torchvision\n\nimport albumentations as alb\n\n\ndef run():\n train_dataset = torchvision.datasets.CIFAR10(root='input/data', train=True, download = True)\n val_dataset = torchvision.datasets.CIFAR10(root='input/data', train=False, download = True)\n\n train_transform = alb.Compose([\n alb.Resize(config.image_height, config.image_width, always_apply=True),\n alb.Normalize(config.mean, config.std, always_apply=True),\n alb.HorizontalFlip(p=0.1),\n alb.RandomBrightness(p=0.2),\n alb.RandomContrast(p=0.1),\n alb.RGBShift(p=0.1),\n alb.GaussNoise(p=0.1),\n ])\n\n val_transforms = alb.Compose([\n alb.Resize(config.image_height, config.image_width, always_apply=True),\n alb.Normalize(config.mean, config.std, always_apply=True)\n ])\n\n \n train_data = dataloader.dataloader(train_dataset, train_transform)\n val_data = dataloader.dataloader(val_dataset, val_transforms)\n\n\n train_loader = torch.utils.data.DataLoader(\n train_data,\n num_workers=4,\n pin_memory=True,\n batch_size=config.Batch_Size\n )\n\n val_loader = torch.utils.data.DataLoader(\n val_data,\n num_workers=4,\n pin_memory=True,\n batch_size=config.Batch_Size\n )\n\n model = ImageTransformer.ViT(\n patch_height = 16,\n patch_width = 16,\n embedding_dims = 768,\n dropout = 0.1,\n heads = 4,\n num_layers = 4,\n forward_expansion = 4,\n max_len = int((32*32)/(16*16)),\n layer_norm_eps = 1e-5,\n num_classes = 10,\n )\n \n if torch.cuda.is_available():\n accelarator = 'cuda'\n else:\n accelarator = 'cpu'\n \n device = torch.device(accelarator)\n torch.backends.cudnn.benchmark = True\n\n model = model.to(device)\n\n optimizer = transformers.AdamW(model.parameters(), lr=config.LR, weight_decay=config.weight_decay)\n\n num_training_steps = int((config.Epochs*len(train_dataset))/config.Batch_Size)\n\n scheduler = transformers.get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps = int(0.1*num_training_steps),\n num_training_steps = num_training_steps\n )\n \n best_acc = 0\n best_model = 0\n for epoch in range(config.Epochs):\n train_acc, train_loss = engine.train_fn(model, train_loader, optimizer, scheduler, device)\n val_acc, val_loss = engine.eval_fn(model, val_loader, device)\n print(f'\\nEPOCH = {epoch+1} / {config.Epochs} | LR = {scheduler.get_last_lr()[0]}')\n print(f'TRAIN ACC = {train_acc*100}% | TRAIN LOSS = {train_loss}')\n print(f'VAL ACC = {val_acc*100}% | VAL LOSS = {val_loss}')\n if val_acc > best_acc:\n best_acc = val_acc\n best_model = model.state_dict()\n\n torch.save(best_model, config.Model_Path) \n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "torchvision.datasets.CIFAR10", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 17, "usage_type": "attribute"}, {"api_name": "albumentations.Compose", "line_number": 19, "usage_type": "call"}, {"api_name": "albumentations.Resize", "line_number": 20, "usage_type": "call"}, {"api_name": "config.image_height", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.image_width", "line_number": 20, "usage_type": "attribute"}, {"api_name": "albumentations.Normalize", "line_number": 21, "usage_type": "call"}, {"api_name": "config.mean", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.std", "line_number": 21, "usage_type": "attribute"}, {"api_name": "albumentations.HorizontalFlip", "line_number": 22, "usage_type": "call"}, {"api_name": "albumentations.RandomBrightness", "line_number": 23, "usage_type": "call"}, {"api_name": "albumentations.RandomContrast", "line_number": 24, "usage_type": "call"}, {"api_name": "albumentations.RGBShift", "line_number": 25, "usage_type": "call"}, {"api_name": "albumentations.GaussNoise", "line_number": 26, "usage_type": "call"}, {"api_name": "albumentations.Compose", "line_number": 29, "usage_type": "call"}, {"api_name": "albumentations.Resize", "line_number": 30, "usage_type": "call"}, {"api_name": "config.image_height", "line_number": 30, "usage_type": "attribute"}, {"api_name": "config.image_width", "line_number": 30, "usage_type": "attribute"}, {"api_name": "albumentations.Normalize", "line_number": 31, "usage_type": "call"}, {"api_name": "config.mean", "line_number": 31, "usage_type": "attribute"}, {"api_name": "config.std", "line_number": 31, "usage_type": "attribute"}, {"api_name": "dataloader.dataloader", "line_number": 35, "usage_type": "call"}, {"api_name": "dataloader.dataloader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 39, "usage_type": "attribute"}, {"api_name": "config.Batch_Size", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 46, "usage_type": "attribute"}, {"api_name": "config.Batch_Size", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ImageTransformer.ViT", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 72, "usage_type": "attribute"}, {"api_name": "transformers.AdamW", "line_number": 76, "usage_type": "call"}, {"api_name": "config.LR", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.weight_decay", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.Epochs", "line_number": 78, "usage_type": "attribute"}, {"api_name": "config.Batch_Size", "line_number": 78, "usage_type": "attribute"}, {"api_name": "transformers.get_linear_schedule_with_warmup", "line_number": 80, "usage_type": "call"}, {"api_name": "config.Epochs", "line_number": 88, "usage_type": "attribute"}, {"api_name": "engine.train_fn", "line_number": 89, "usage_type": "call"}, {"api_name": "engine.eval_fn", "line_number": 90, "usage_type": "call"}, {"api_name": "config.Epochs", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 98, "usage_type": "call"}, {"api_name": "config.Model_Path", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "626187006", "text": "\n'''\nPart of the code is drawn from \nhttps://github.com/lixucuhk/adversarial-attack-on-GMM-i-vector-based-speaker-verification-systems\nPaper:\nAdversarial Attacks on GMM i-vector based Speaker Verification Systems\n'''\n\nimport torch\nimport kaldi_io\n\n \nclass PLDA(object):\n\tdef __init__(self, mdlfile, random=False, device=\"cpu\"):\n\n\t\tself.device = device\n\n\t\tif random == True:\n\t\t\tself.dim = 600\n\t\t\tself.mean = torch.ones(self.dim, device=self.device)\n\t\t\tself.transform = torch.ones(self.dim, self.dim, device=self.device)\n\t\t\tself.psi = torch.ones(self.dim, device=self.device)\n\t\telse:\n\t\t\trdfile = open(mdlfile, 'r')\n\t\t\tline = rdfile.readline()\n\t\t\tdata = line.split()[2:-1]\n\t\t\tself.dim = len(data)\n\t\t\tfor i in range(self.dim):\n\t\t\t\tdata[i] = float(data[i])\n\t\t\tself.mean = torch.tensor(data, device=self.device)\n\n\t\t\tline = rdfile.readline()\n\t\t\tline = rdfile.readline()\n\t\t\ttransform_matrix = []\n\t\t\tfor i in range(self.dim):\n\t\t\t\tdata = line.split(' ')[2:-1]\n\t\t\t\tfor j in range(self.dim):\n\t\t\t\t\tdata[j] = float(data[j])\n\t\t\t\ttransform_matrix.append(data)\n\t\t\t\tline = rdfile.readline()\n\t\t\tself.transform = torch.tensor(transform_matrix, device=self.device)\n\n\t\t\tdata = line.split()[1:-1]\n\t\t\tfor i in range(self.dim):\n\t\t\t\tdata[i] = float(data[i])\n\t\t\tself.psi = torch.tensor(data, device=self.device)\n\n\t\t\trdfile.close()\n\n\tdef ReadIvectors(self, ivectorfile):\n\t\tkeys = []\n\t\tdata = []\n\t\ti = 0\n\t\tfor key, mat in kaldi_io.read_vec_flt_scp(ivectorfile):\n\t\t\t# print(key)\n\t\t\t# print(mat)\n\t\t\t# print(len(mat.tolist()))\n\t\t\t# exit(0)\n\t\t\ti += 1\n\t\t\tkeys.append(key) \n\t\t\tdata.append(mat.tolist())\n\t\tprint('totally %d ivectors' %(i))\n\t\treturn keys, data\n\n\tdef TransformIvector(self, ivector, num_examples, simple_length_norm, normalize_length):\n\t\t# print(self.transform, self.mean, ivector),\n\t\ttrans_ivector = torch.matmul(self.transform, ivector-self.mean)\n\t\tfactor = 1.0\n\t\tif simple_length_norm == True:\n\t\t\tfactor = torch.sqrt(self.dim)/torch.norm(trans_ivector, 2)\n\t\telif normalize_length == True:\n\t\t\tfactor = self.GetNormalizaionFactor(trans_ivector, num_examples)\n\n\t\t# print('original ivector is \\n')\n\t\t# print(trans_ivector)\n\t\ttrans_ivector = trans_ivector*factor\n\t\t# print('factor is %f' %(factor))\n\t\t# print('transformed ivector is \\n')\n\t\t# print(trans_ivector)\n\n\t\treturn trans_ivector\n\n\n\tdef GetNormalizaionFactor(self, trans_ivector, num_examples):\n\t\ttrans_ivector_sq = torch.pow(trans_ivector, 2)\n\t\tinv_covar = 1.0/(self.psi + 1.0/num_examples)\n\t\tfactor = torch.sqrt(self.dim / torch.dot(inv_covar, trans_ivector_sq))\n\n\t\treturn factor\n\n\tdef ComputeScores_loop(self, trans_trainivector, num_examples, trans_testivector):\n\t\t# trans_trainivector = self.TransformIvector(trainivector, num_examples, simple_length_norm, normalize_length)\n\t\t# trans_testivector = self.TransformIvector(testivector, 1, simple_length_norm, normalize_length)\n\n\t\t#### work out loglike_given_class\n\t\t# mean = torch.zeros(self.dim) \n\t\t# variance = torch.zeros(self.dim)\n\t\tmean = torch.zeros(self.dim, device=self.device)\n\t\tvariance = torch.zeros(self.dim, device=self.device)\n\n\t\t# debug\n\t\t# print(self.dim),\n\t\t# print(mean.size())\n\t\t# print(variance.size())\n\t\t# print(self.psi.size())\n\t\t# print(trans_trainivector.size()) \n\t\t# print(trans_testivector.size()) \n\n\t\tfor i in range(self.dim):\n\t\t\tmean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]\n\t\t\tvariance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)\n\n\t\tlogdet = torch.sum(torch.log(variance))\n\n\t\tsqdiff = torch.pow(trans_testivector-mean, 2)\n\t\tvariance = 1.0/variance\n\n\t\tloglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))\n\n\t\t### work out loglike_without_class\n\t\tsqdiff = torch.pow(trans_testivector, 2)\n\t\tvariance = self.psi + 1.0\n\t\tlogdet = torch.sum(torch.log(variance))\n\t\tvariance = 1.0/variance\n\t\tloglike_without_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))\n\n\t\tloglike_ratio = loglike_given_class - loglike_without_class\n\n\t\treturn loglike_ratio\n\n\t## no for loop and batch train_ivectors version\n\tdef ComputeScores(self, trans_trainivector, num_examples, trans_testivector):\n\t\t# trans_trainivector = self.TransformIvector(trainivector, num_examples, simple_length_norm, normalize_length)\n\t\t# trans_testivector = self.TransformIvector(testivector, 1, simple_length_norm, normalize_length)\n\n\t\t#### work out loglike_given_class\n\t\t# mean = torch.zeros(self.dim)\n\t\t# variance = torch.zeros(self.dim)\n\t\t# mean = torch.zeros(self.dim, device=self.device)\n\t\t# variance = torch.zeros(self.dim, device=self.device)\n\t\tn_train_ivectors = trans_trainivector.shape[0]\n\t\tassert num_examples == 1\n\t\tmean = torch.zeros((n_train_ivectors, self.dim), device=self.device)\n\t\tvariance = torch.zeros((n_train_ivectors, self.dim), device=self.device)\n\n\t\t# debug\n\t\t# print(self.dim), \n\t\t# print(mean.size()) \n\t\t# print(variance.size()) \n\t\t# print(self.psi.size()) \n\t\t# print(trans_trainivector.size()) \n\t\t# print(trans_testivector.size()) \n\n\t\t# for i in range(self.dim):\n\t\t# \tmean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]\n\t\t# \tvariance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)\n\t\t# for i in range(self.dim):\n\t\t# \tmean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]\n\t\t# \tvariance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)\n\t\tmean = num_examples * self.psi / (num_examples * self.psi + 1.0) * trans_trainivector # (n, dim)\n\t\tvariance = (1.0 + self.psi / (num_examples * self.psi + 1.0)).expand(n_train_ivectors, self.dim) # (n, dim)\n\t\t# print(mean.shape, variance.shape)\n\n\t\t# logdet = torch.sum(torch.log(variance)) \n\t\tlogdet = torch.sum(torch.log(variance), dim=1) # (n, ) \n\n\t\tsqdiff = torch.pow(trans_testivector-mean, 2) # (n, dim)\n\t\tvariance = 1.0/variance # (n, dim)\n\n\t\t# loglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))\n\t\tloglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.sum(sqdiff * variance, axis=1)) # (n, )\n\n\t\t### work out loglike_without_class\n\t\tsqdiff = torch.pow(trans_testivector, 2) # (dim, )\n\t\tvariance = self.psi + 1.0 # (dim, )\n\t\tlogdet = torch.sum(torch.log(variance)) # scalar\n\t\tvariance = 1.0/variance # (dim, )\n\t\tloglike_without_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance)) # scalar\n\n\t\tloglike_ratio = loglike_given_class - loglike_without_class # (n,)\n\n\t\treturn loglike_ratio\n\n\tdef DRV_TransformIvector(self, ivector, num_examples, simple_length_norm, normalize_length):\n\t\t############ Currently we only consider simple_length_norm = False situation.\n\t\tif normalize_length == True:\n\t\t\ttrans_ivector = torch.matmul(self.transform, ivector-self.mean)\n\t\t\tfactor = 1.0\n\t\t\tfactor = self.GetNormalizaionFactor(trans_ivector, num_examples)\n\n\t\t\tnorm_drv = torch.zeros(self.dim, self.dim, device=self.device)\n\t\t\ttrans_ivector_sq = torch.pow(trans_ivector, 2)\n\n\t\t\tcommon_vector = torch.matmul(torch.diag(num_examples/(num_examples*self.psi+1)), \\\n\t\t\t\t -1*trans_ivector_sq*torch.pow(factor, 3)/self.dim)\n\n\t\t\tfor i in range(self.dim):\n\t\t\t\tnorm_drv[:,i] += common_vector\n\t\t\t\tnorm_drv[i][i] += factor\n\n\t\t\ttransform_drv = torch.matmul(self.transform.t(), norm_drv)\n\t\telse:\n\t\t\ttransform_drv = self.transform.t()\n\n\t\treturn transform_drv\n\n\tdef DRV_Scores(self, trans_trainivector, num_examples, trans_testivector):\n\t\tmean = torch.zeros(self.dim)\n\t\tv1 = torch.zeros(self.dim)\n\n\t\tfor i in range(self.dim):\n\t\t\tmean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]\n\t\t\tv1[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)\n\n\t\tv1 = 1.0/v1\n\t\tv2 = 1.0/(1+self.psi)\n\n\t\tscore_drv = torch.matmul(torch.diag(trans_testivector), v2)-torch.matmul(torch.diag(trans_testivector-mean), v1)\n\n\t\treturn score_drv\n\t\n\tdef to(self, device):\n\n\t\tif device == self.device:\n\t\t\treturn\n\t\t\n\t\tself.device = device\n\t\tself.mean = self.mean.to(self.device)\n\t\tself.transform = self.transform.to(self.device)\n\t\tself.psi = self.psi.to(self.device)", "sub_path": "model/plda.py", "file_name": "plda.py", "file_ext": "py", "file_size_in_byte": 8183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "kaldi_io.read_vec_flt_scp", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.diag", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.diag", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "214042706", "text": "\"\"\"empty message\n\nRevision ID: 4c6632617022\nRevises: f719fe7c700a\nCreate Date: 2017-02-15 22:00:21.421420\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4c6632617022'\ndown_revision = 'f719fe7c700a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('production_needs', 'product',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.alter_column('production_needs', 'product_in',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('production_needs', 'product_in',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.alter_column('production_needs', 'product',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/4c6632617022_.py", "file_name": "4c6632617022_.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "alembic.op.alter_column", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "418301238", "text": "from budgetkey_data_pipelines.pipelines.procurement.tenders.exemptions.exemptions_scraper import ExemptionsPublisherScraper, TooManyFailuresException\nimport os\nimport json\nfrom requests.exceptions import HTTPError, ConnectionError\nfrom itertools import islice\n\n\nclass MockExemptionsPublisherScraper(ExemptionsPublisherScraper):\n \"\"\"\n opens files from local filesystem instead of from the source\n \"\"\"\n\n def __init__(self, publisher_id, write_prefix=None, mock_http_failures=0, **kwargs):\n self.write_prefix = write_prefix\n self._num_requests = {}\n self._mock_http_failures = mock_http_failures\n kwargs.setdefault(\"wait_between_retries\", 0.001)\n super(MockExemptionsPublisherScraper, self).__init__(publisher_id, **kwargs)\n\n def _get_page_text(self, form_data=None):\n if form_data:\n filename = \"SearchExemptionMessages.aspx.publisher{}-page{}\".format(self._publisher_id, self._cur_page_num)\n else:\n filename = \"SearchExemptionMessages.aspx\"\n if filename not in self._num_requests:\n self._num_requests[filename] = 0\n self._num_requests[filename] += 1\n if self._mock_http_failures < self._num_requests[filename]:\n if self.write_prefix:\n with open(os.path.join(os.path.dirname(__file__), \"{}{}\".format(self.write_prefix, filename)), \"w\") as f:\n if form_data:\n real_text = super(MockExemptionsPublisherScraper, self)._get_page_text(form_data)\n f.write(json.dumps(form_data))\n f.write(\"\\n\\n\")\n f.write(real_text)\n else:\n real_text = super(MockExemptionsPublisherScraper, self)._get_page_text()\n f.write(real_text)\n return real_text\n else:\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()\n elif self._num_requests[filename] == 1:\n raise ConnectionError(\"fake connecion error\")\n else:\n raise HTTPError(\"fake http error\")\n\nclass MockExemptionsPublisherScraperUnlimitedPages(ExemptionsPublisherScraper):\n\n def __init__(self, **kwargs):\n super(MockExemptionsPublisherScraperUnlimitedPages, self).__init__(10, **kwargs)\n\n def _get_page_text(self, form_data=None):\n if form_data:\n filename = \"SearchExemptionMessages.aspx.publisher10-page1\"\n else:\n filename = \"SearchExemptionMessages.aspx\"\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()\n\n def _get_num_pages(self):\n return self._cur_page_num + 1\n\ndef test():\n # 10 = המשרד לביטחון פנים - משטרת ישראל\n actual_urls = list(MockExemptionsPublisherScraper(10).get_urls())\n assert_publisher_10_urls(actual_urls)\n\n# this test is skipped because it does real conncetion to gov website\n# it can be used locally to regenerate the mock files or to test functionality of the real website\ndef skip_test_no_mock():\n # 10 = המשרד לביטחון פנים - משטרת ישראל\n actual_urls = []\n # gets all the exemptions, wait until the first one in the mock test, then get 12 urls and stop\n for url in MockExemptionsPublisherScraper(10, \"dump_\").get_urls():\n is_first_url = url == \"/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596877\"\n if is_first_url or (len(actual_urls) > 0 and len(actual_urls) <= 12):\n actual_urls.append(url)\n elif len(actual_urls) >= 12:\n break\n assert_publisher_10_urls(actual_urls)\n\ndef test_retries():\n # mock_http_failures parameter causes every http request to fail 3 times and succeed on the fourth time\n scraper = MockExemptionsPublisherScraper(10, mock_http_failures=3)\n # it works due to retry mechanism\n assert_publisher_10_urls(list(scraper.get_urls()))\n\ndef test_retries_too_many_failures():\n # but - if we have too many failures (by default - more then 10)\n try:\n list(MockExemptionsPublisherScraper(10, mock_http_failures=11).get_urls())\n got_exception = False\n except TooManyFailuresException:\n got_exception = True\n # then we get a TooManyFailuresException\n assert got_exception\n\ndef test_max_pages():\n # 1 page = 10 results\n assert_max_pages(max_pages=1, num_expected_results=10)\n assert_max_pages(max_pages=2, num_expected_results=20)\n # the scraper yields unlimited results\n assert_max_pages(max_pages=6, num_expected_results=60)\n # 0 or negative number - will yield all pages (in the assertion we islice it to 300)\n assert_max_pages(max_pages=0, num_expected_results=300)\n assert_max_pages(max_pages=-1, num_expected_results=300)\n\ndef assert_publisher_10_urls(actual_urls):\n assert actual_urls == [\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596877',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596879',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596880',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596739',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596740',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596741',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596751',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596752',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596753',\n \"/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596755\",\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596686',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596700',\n '/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596714'\n ]\n\ndef assert_max_pages(max_pages, num_expected_results):\n scraper = MockExemptionsPublisherScraperUnlimitedPages(max_pages=max_pages)\n # the scraper will yield unlimited results, we stop at 300 results\n urls = islice(scraper.get_urls(), 300)\n assert sum(1 for _ in urls) == num_expected_results\n", "sub_path": "tests/procurement/tenders/exemptions/test_exemptions_scraper.py", "file_name": "test_exemptions_scraper.py", "file_ext": "py", "file_size_in_byte": 6113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "budgetkey_data_pipelines.pipelines.procurement.tenders.exemptions.exemptions_scraper.ExemptionsPublisherScraper", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 46, "usage_type": "call"}, {"api_name": "budgetkey_data_pipelines.pipelines.procurement.tenders.exemptions.exemptions_scraper.ExemptionsPublisherScraper", "line_number": 48, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "budgetkey_data_pipelines.pipelines.procurement.tenders.exemptions.exemptions_scraper.TooManyFailuresException", "line_number": 94, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "310121907", "text": "from selenium import webdriver\r\nimport webbrowser\r\nnameofbot = \"likuz\"\r\nbot = True\r\n\r\nprint(\"*Likuz is alive*\")\r\nprint(\"'Hello, my friend :)'\")\r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"LOGIN\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\ndef newlogin():\r\n id = \"a\"\r\n pw = \"1\"\r\n \"\"\"print(id,pw)\"\"\"\r\n print(\"Type in your ID\")\r\n if input() == id:\r\n print(\"Give your PW\")\r\n if input() == pw:\r\n print(\"Logged in as asariox\")\r\n return 1\r\n else:\r\n print(\"Not successful pw\")\r\n return 0\r\n else:\r\n print(\"not successful id\")\r\n return 0\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"Functions if logged in\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\ndef kick_member(name):\r\n print(name + \" got kicked\")\r\n\r\ndef inv_member(name):\r\n print(name + \" has been invited\")\r\n\r\ndef open_website(url):\r\n wlist=[\"youtube.com\",\"fb.com\",\"hotmail.com\",\"asariox.de\",\"pr0gramm.com\"] # database method\r\n if url == \"yt\":\r\n url = wlist[0]\r\n elif url == \"fb\":\r\n url = wlist[1]\r\n elif url == \"hotmail\":\r\n url = wlist[2]\r\n elif url == \"asar\":\r\n url = wlist[3]\r\n elif url == \"pr0\":\r\n url = wlist[4]\r\n else:\r\n print(\"Failed to continue the programm. Sorry.\")\r\n \"\"\"if url == \"yt\":\r\n url = \"youtube.com\"\"\" # this works as well :)\r\n webbrowser.open('https://' + url)\r\n\r\ndef sayvar():\r\n sayvar = input(\"what do you want to say: \")\r\n print(sayvar)\r\n print(\"successfully completed sayvar\")\r\n # return sayvar\r\n \r\ndef add_member():\r\n from collections import deque\r\n #file = open('filename.txt', 'r')\r\n alist = deque([\"likuz\",\"Elswarrior\"])\r\n a = input(\"names?\")\r\n alist.append(a)\r\n print(alist)\r\n import pickle\r\n objecta = alist\r\n #str(objecta)\r\n filehandler = open('filename.txt', 'wb')\r\n pickle.dump(objecta, filehandler)\r\n #all\r\n\r\ndef delete_member():\r\n from collections import deque\r\n alist = deque([\"likuz\",\"Elswarrior\"])\r\n a = input(\"names?\")\r\n alist.pop(a)\r\n print(alist)\r\n import pickle\r\n objecta = alist\r\n filehandler = open('filename.txt', 'wb')\r\n pickle.dump(objecta,\r\n filehandler)\r\n\r\ndef permissions():\r\n a = \"Admin\"\r\n c = \"Elswarrior\"+\" = \"+a\r\n print(c)\r\n\r\ndef swap_vars():\r\n \"\"\"TEST PURPOSE\r\n \"\"\"\r\n a, b = 1, 2\r\n a, b = b, a\r\n a, b\r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"Control (BOT)\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\nif newlogin() == 1: # since I have the new newlogin return value\r\n a = \"1\" # I should initialize a with it to keep \"while\" active\r\n\r\n\r\ndef whilea():\r\n while bot and a == \"1\" and nameofbot == \"likuz\":\r\n #menue\r\n print(\"Bot Control Interface\")\r\n print(\"\"\"\r\n k = kicks member (LINE)\r\n a = adds member (LINE)\r\n i = invites member (LINE)\r\n o = opens a website (PERSONAL USE)\r\n say = let us the bot say something u typed in (BOTH)\r\n video = youtube search\r\n permissions() = shows list of users\r\n ... more functions will be added when I'm not lazy ...\r\n \"\"\")\r\n typein = input()\r\n if typein == \"k\":\r\n name = input()\r\n kick_member(name)\r\n \r\n elif typein == \"i\":\r\n name = input()\r\n inv_member(name)\r\n elif typein == \"o\": \r\n open_website(url)\r\n elif typein == \"say\":\r\n inputsay = input(\"command: \")\r\n print(\"successfully completed command\")\r\n sayvar()\r\n #global (nameofbot) variables just work in definitions of functions\r\n if inputsay == nameofbot + \" \" + \"say\":\r\n print(\"successfully completed if command\")\r\n if sayvar and sayvar is not None:\r\n print(\"successfully completed sayvar\")\r\n print(sayvar)\r\n else:\r\n print(\"sayvar failed\")\r\n else:\r\n print(\"command failed\")\r\n elif typein == \"video\":\r\n search_video = input()\r\n video_link = \"https://www.youtube.com/results?search_query=\"\r\n xyz = video_link + search_video\r\n browser = webdriver.Firefox()\r\n browser.get(xyz)\r\n z = browser.find_element_by_partial_link_text('/watch?v=').click() #funktioniert nur bei einem Wort im Suchfeld... Ich muss das fixen.\r\n print(z)\r\n #webbrowser.get(video_link + search_video)\r\n else:\r\n print(\"No, oh shed\")\r\n quit()\r\n \r\n\"\"\"\"\"\"\"MAIN PROGRAMM\"\"\"\"\"\"\"\"\"\"\"\"\"\r\nif __name__ == \"__main__\":\r\n whilea()\r\nelse:\r\n print(\"FAILED!\")\r\n \r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"END OF DOCUMENT\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\nprint(\"Goodbye, my friend ::\")\r\nprint(\"*Likuz is dead*\")\r\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 5611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "webbrowser.open", "line_number": 49, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 73, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 142, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "594710079", "text": "from flask import render_template, request\nfrom flask.json import jsonify\nfrom models.models import Feed\n\ndef handle_feed(feed_id):\n page_format = request.args.get('format')\n\n if page_format == 'json':\n return handle_json_response(feed_id)\n else:\n return render_template('index.html')\n\ndef handle_json_response(feed_id):\n \n feed = Feed.query.filter_by(id=feed_id).first()\n serialized_feed = feed.serialize()\n json_response = {\n 'feed': serialized_feed\n }\n\n return jsonify(json_response)\n", "sub_path": "controllers/feed.py", "file_name": "feed.py", "file_ext": "py", "file_size_in_byte": 535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.request.args.get", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 6, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 6, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "models.models.Feed.query.filter_by", "line_number": 15, "usage_type": "call"}, {"api_name": "models.models.Feed.query", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.models.Feed", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "204380708", "text": "import json\nfrom flask import request, Response\n\nfrom . import app\nfrom .services import user_profile_service\nfrom .clients import USER_PROFILE_CLIENT_MAP\n\n\ndef error_response(exception, status, message=None):\n if message is None:\n message = str(exception)\n response = {'error_message': message}\n\n return Response(\n json.dumps(response, default=str),\n status=status,\n mimetype='application/json'\n )\n\n\n@app.route('/user-profile', methods=['GET'])\ndef generate_user_profile():\n \"\"\"\n Processes a GET request with third_party client usernames\n as query params.\n \"\"\"\n log_msg = \"Received request for User Profile Data:\\n\"\n\n args = request.args\n user_names = \"\"\n for client_key, username in args.items():\n user_names += \"{}: {}\\n\".format(client_key, username)\n client = USER_PROFILE_CLIENT_MAP.get(client_key)\n if client:\n success = user_profile_service._add_client(client, username)\n if not success:\n error_msg = (\n \"Received Bad Username in Request for User Profile \"\n \"Data - {}: {}\".format(client_key, username)\n )\n\n app.logger.debug(error_msg)\n # Decide whether continue if still have 1 valid user in the bunch or error\n # out altogether. Keeping it at all must be valid\n return Response(json.dumps({\"error\": error_msg}), status=400)\n\n app.logger.info(log_msg + user_names)\n\n user_profile = user_profile_service.get_user_profile()\n\n return Response(\n json.dumps(user_profile, default=str),\n status=200,\n mimetype='application/json'\n )\n", "sub_path": "app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Response", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "clients.USER_PROFILE_CLIENT_MAP.get", "line_number": 33, "usage_type": "call"}, {"api_name": "clients.USER_PROFILE_CLIENT_MAP", "line_number": 33, "usage_type": "name"}, {"api_name": "services.user_profile_service._add_client", "line_number": 35, "usage_type": "call"}, {"api_name": "services.user_profile_service", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "services.user_profile_service.get_user_profile", "line_number": 49, "usage_type": "call"}, {"api_name": "services.user_profile_service", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "212593434", "text": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\n# Use this code snippet in your app.\n# If you need more information about configurations or implementing the sample code, visit the AWS docs:\n# https://aws.amazon.com/developers/getting-started/python/\n\nimport argparse\nimport logging\nimport tempfile\nfrom time import sleep\n\nimport boto3\nimport cbor\nimport json\nimport requests\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\nfrom AWSIoTDeviceDefenderAgentSDK import collector\n\nimport os\n\nPRIVATE_KEY = \"private_key\"\nCERTIFICATE = \"certificate\"\nPOLICY_NAME = \"service_host_agent_policy\"\n\n# Variable to track publish metrics response\nlatest_accepted_report_id = 0\n\n\ndef get_mqtt_endpoint(session, cp_endpoint_url):\n iot = session.client(service_name=\"iot\", endpoint_url=cp_endpoint_url)\n resp = iot.describe_endpoint(endpointType=\"iot:Data-ATS\")\n return resp[\"endpointAddress\"]\n\n\ndef get_instance_metadata():\n return requests.get(\n \"http://169.254.169.254/latest/dynamic/instance-identity/document\"\n ).json()\n\n\ndef get_region():\n return get_instance_metadata().get(\"region\")\n\n\ndef get_instance_id():\n return get_instance_metadata().get(\"instanceId\")\n\n\ndef get_root_ca():\n url = \"https://www.amazontrust.com/repository/AmazonRootCA1.pem\"\n ca_text = requests.get(url).text\n ca_temp_file = tempfile.NamedTemporaryFile(\"w\")\n ca_temp_file.write(ca_text)\n ca_temp_file.file.flush()\n return ca_temp_file\n\n\ndef get_client_id():\n return get_instance_id()\n\n\ndef get_cp_endpoint_url(domain, region):\n if domain == \"prod\":\n return \"https://iot.\" + region + \".amazonaws.com\"\n else:\n return \"https://\" + domain + \".\" + region + \".iot.amazonaws.com\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(fromfile_prefix_chars=\"@\")\n parser.add_argument(\n \"-r\",\n \"--region\",\n action=\"store\",\n required=False,\n dest=\"region\",\n help=\"AWS Region Code (ex: us-east-1), defaults to the region of the instance\",\n )\n parser.add_argument(\n \"-d\",\n \"--domain\",\n action=\"store\",\n required=False,\n dest=\"domain\",\n help=\"application domain (ex: prod or gamma), defaults to gamma\",\n )\n parser.add_argument(\n \"-n\",\n \"--name\",\n action=\"store\",\n required=False,\n dest=\"name\",\n help=\"Supply a thing name instead of using EC2 Instance Id\",\n )\n parser.add_argument(\n \"-e\",\n \"--cp-endpoint-url\",\n action=\"store\",\n required=False,\n dest=\"cp_endpoint_url\",\n help=\"Supply the URL for the control plane APIs, defaults to\"\n \" https://gamma.us-west-2.iot.amazonaws.com\",\n )\n parser.add_argument(\n \"-m\",\n \"--mqtt-endpoint\",\n action=\"store\",\n required=False,\n dest=\"mqtt_endpoint\",\n help=\"Supply the MQTT endpoint to submit metrics to, defaults to\"\n \" the endpoint retrieved by calling describe-endpoint\",\n )\n return parser.parse_args()\n\n\ndef ack_callback(client, userdata, message):\n response_payload = json.loads(message.payload.decode(\"ASCII\"))\n if \"json\" in message.topic:\n logging.info(\n \"Received a new message: {} from topic: {}\".format(\n message.payload, message.topic\n )\n )\n else:\n response_payload = json.loads(cbor.loads(message.payload))\n logging.info(\n \"Received a new message: {} from topic: {}\".format(\n cbor.loads(message.payload), message.topic\n )\n )\n global latest_accepted_report_id\n if \"accepted\" in message.topic:\n report_id = response_payload.get(\"reportId\")\n latest_accepted_report_id = report_id\n\n\ndef start_metrics_collection(\n region_name, cp_endpoint_url, client_id, iot_client, topic, sample_rate\n):\n # Collector samples metrics from the system, it can track the previous metric to generate deltas\n coll = collector.Collector(False)\n metric = None\n first_sample = (\n True # don't publish first sample, so we can accurately report delta metrics\n )\n while True:\n logging.info(\"collecting metrics\")\n metric = coll.collect_metrics()\n if first_sample:\n first_sample = False\n else:\n session = boto3.session.Session(region_name=region_name)\n\n # This is a cheap hack to ensure we reset the creds every so often,\n # since the temporary creds expire. SDK doesn't seem to have a way\n # to reset these creds other than periodically updating these creds\n # by calling iot_client.configureIAMCredentials or subclassing the\n # MQTT client for listening to the onOffline callback. Details in\n # this SIM: https://t.corp.amazon.com/issues/SDK-15249/communication\n credentials = session.get_credentials()\n iot_client.configureIAMCredentials(\n credentials.access_key, credentials.secret_key, credentials.token\n )\n\n report_id = metric._v1_metrics().get(\"header\").get(\"report_id\")\n iot_client.publish(topic=topic, payload=metric.to_json_string(), QoS=0)\n logging.info(\"Published report with report_id: {}\".format(report_id))\n\n max_iterations = 5\n while max_iterations > 0:\n # Sleep 10s to allow receiving a response for the latest publish.\n sleep(10)\n max_iterations = max_iterations - 1\n if latest_accepted_report_id == report_id:\n logging.info(\n \"Received successful ack for reportId: {}\".format(\n latest_accepted_report_id\n )\n )\n break\n\n logging.info(\n \"Republishing report with reportId: {}, last accepted reportId: {}\".format(\n report_id, latest_accepted_report_id\n )\n )\n iot_client.publish(topic=topic, payload=metric.to_json_string(), QoS=0)\n sleep(float(sample_rate))\n\n\ndef main():\n logger = logging.getLogger(\"AWSIoTPythonSDK.core\")\n logger.setLevel(logging.DEBUG)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n args = parse_args()\n\n if args.region:\n region_name = args.region\n else:\n region_name = get_region()\n\n if args.domain:\n domain_name = args.domain\n else:\n domain_name = \"prod\"\n\n if args.cp_endpoint_url:\n cp_endpoint_url = args.cp_endpoint_url\n else:\n cp_endpoint_url = get_cp_endpoint_url(domain=domain_name, region=region_name)\n\n session = boto3.session.Session(region_name=region_name)\n\n if args.name:\n client_id = args.name\n else:\n client_id = (\n get_client_id()\n ) # This will set the client-id based on the ec2 instance id\n\n if not client_id:\n logging.info(\"Failed to determine client_id, quitting\")\n exit(1)\n\n logging.info(\n \"Running agent with domain: {}, region: {}, clientId: {}, cp_endpoint_url: {}\".format(\n domain_name, region_name, client_id, cp_endpoint_url\n )\n )\n\n ca_cert_file = get_root_ca()\n\n if args.mqtt_endpoint:\n mqtt_endpoint = args.mqtt_endpoint\n else:\n logging.info(\"Attempting to retrieve Mqtt endpoint\")\n mqtt_endpoint = get_mqtt_endpoint(session, cp_endpoint_url)\n\n logging.info(\"Using Mqtt endpoint: {}\".format(mqtt_endpoint))\n\n iot_client = AWSIoTMQTTClient(client_id, useWebsocket=True)\n iot_client.configureEndpoint(mqtt_endpoint, 443, region_name)\n credentials = session.get_credentials()\n iot_client.configureCredentials(ca_cert_file.name)\n iot_client.configureIAMCredentials(\n credentials.access_key, credentials.secret_key, credentials.token\n )\n\n # AWSIoTMQTTClient connection configuration\n iot_client.configureAutoReconnectBackoffTime(1, 32, 20)\n iot_client.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\n iot_client.configureDrainingFrequency(2) # Draining: 2 Hz\n iot_client.configureConnectDisconnectTimeout(30)\n iot_client.configureMQTTOperationTimeout(20) # 5 sec\n\n # Connect and subscribe to AWS IoT\n iot_client.connect()\n sleep(2)\n topic = \"$aws/things/{}/defender/metrics/{}\".format(client_id, \"json\")\n # Subscribe to the accepted/rejected topics to indicate status of published metrics reports\n # topic=subscribe_to_topic, callback=callback, QoS=1,\n iot_client.subscribe(\n topic=\"{}/accepted\".format(topic), callback=ack_callback, QoS=1\n )\n iot_client.subscribe(\n topic=\"{}/rejected\".format(topic), callback=ack_callback, QoS=1\n )\n\n start_metrics_collection(\n region_name=region_name,\n cp_endpoint_url=cp_endpoint_url,\n client_id=client_id,\n iot_client=iot_client,\n topic=topic,\n sample_rate=300,\n )\n\n ca_cert_file.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "AWSIoTDeviceDefenderAgentSDK/agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 9798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 64, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 65, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 83, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 132, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 138, "usage_type": "call"}, {"api_name": "cbor.loads", "line_number": 138, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 139, "usage_type": "call"}, {"api_name": "cbor.loads", "line_number": 141, "usage_type": "call"}, {"api_name": "AWSIoTDeviceDefenderAgentSDK.collector.Collector", "line_number": 154, "usage_type": "call"}, {"api_name": "AWSIoTDeviceDefenderAgentSDK.collector", "line_number": 154, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 160, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 165, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 165, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 180, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 185, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 188, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 205, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 206, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 207, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 208, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 231, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 231, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 241, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 244, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 258, "usage_type": "call"}, {"api_name": "AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient", "line_number": 260, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "519935644", "text": "#-*- coding: utf-8 -*-\n# 뷰티플수프를 이용 이미지파일의 소스를 불러오는 코드\n\nimport urllib\nfrom bs4 import BeautifulSoup\n\n# Get all img address at html\n\nfor i in range(140, 150, 1):\n stri = str(i)\n html = urllib.urlopen('http://bbs.ruliweb.com/community/board/300143/read/33025'+ stri)\n soup = BeautifulSoup(html, \"lxml\")\n\n for link in soup.find_all('img'):\n print(link.get('src'))\n", "sub_path": "crawler1.py", "file_name": "crawler1.py", "file_ext": "py", "file_size_in_byte": 425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib.urlopen", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "346248322", "text": "\nimport matplotlib.pyplot as plt\nif __name__ == \"__main__\":\n p=10000\n r=0.000120008\n dt=21 #تغير بالزمن\n pt=[]\n t=[]\n for i in range(int(10000/dt)):\n g= p*r # نسبة التناقص بالسنة\n p=p-(g*dt)\n t.append(i*dt)\n pt.append(p)\n \n print(p) \n plt.plot(t,pt) \n plt.show()\n", "sub_path": "lab2.py", "file_name": "lab2.py", "file_ext": "py", "file_size_in_byte": 348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "27926458", "text": "# Neural Network Trainer\n# nw.py\n\nimport numpy as np\nimport tensorflow as tf\nimport csv\nimport cv2\nimport os.path\nimport random\n\nEPOCHS = 2\n\nprint(' Loading Data ')\n\n#50 percent random\ndef fifty(percent=50):\n return random.randrange(100) < percent\n\n#load and apply some transformations to images\ndef loadAndProcess(img_path):\n img = cv2.imread(img_path)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv\n h, s, v = cv2.split(hsv)\n v += random.randrange(10)\n light_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(light_hsv, cv2.COLOR_HSV2RGB)\n return img\n\nwith open('new_style/driving_log.csv') as csvfile:\n row_count = sum(1 for row in csvfile)\n print('reading %d lines' % row_count)\n # row_count = 400\n csvfile.seek(0, 0)\n reader = csv.reader(csvfile)\n angles = np.zeros(shape=(row_count*3))\n trainimgs = np.zeros(shape=(row_count*3,80,80,3))\n idx = 0\n failed = 0\n dropped = 0\n for line in reader:\n center_img_path = line[0]\n left_img_path = line[1]\n right_img_path = line[2]\n center_angle = float(line[3])\n left_angle = float(center_angle+0.35)\n right_angle = float(center_angle-0.35)\n\n angles[idx] = center_angle\n angles[idx+1] = left_angle\n angles[idx+2] = right_angle\n\n if (not os.path.exists(center_img_path)):\n print(center_img_path)\n failed+=1\n continue\n\n chance_to_keep = fifty()\n if ((center_angle < 0.2) and not chance_to_keep):\n dropped+=1\n continue\n\n imgRGB = loadAndProcess(center_img_path)\n limgRGB = loadAndProcess(left_img_path)\n rimgRGB = loadAndProcess(right_img_path)\n\n trainimgs[idx] = imgRGB\n trainimgs[idx+1] = limgRGB\n trainimgs[idx+2] = rimgRGB\n\n idx+=3\n if (idx == row_count):\n break\n\nprint('failed to load %d' % failed)\nprint('dropped for 0 angle %d' % dropped)\nprint(angles.shape)\nprint(trainimgs.shape)\n\nprint('print angles plot...')\n\nimport scipy.signal as signal\n\n # checking angles distribution\nimport matplotlib\nmatplotlib.use('agg')\nimport pylab as plt\n\nfig, ax = plt.subplots( nrows=1, ncols=1 )\nax.plot(angles)\nfig.savefig('angles.png') # save the figure to file\n\nprint('Data loaded')\n\ntf.python.control_flow_ops = tf\n\nX = trainimgs\ny = angles\n\n# Initial Setup for Keras\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: x/127.5 - 1.,input_shape=(80, 80, 3)))\nmodel.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode=\"same\", input_shape=(80,80, 3)))\nmodel.add(Activation('elu'))\nmodel.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode=\"same\"))\nmodel.add(Activation('elu'))\nmodel.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode=\"same\"))\nmodel.add(Flatten())\nmodel.add(Dropout(.2))\nmodel.add(Activation('elu'))\nmodel.add(Dense(512))\nmodel.add(Dropout(.5))\nmodel.add(Activation('elu'))\nmodel.add(Dense(1))\n\nprint('Start training')\nmodel.compile(loss='mse', optimizer='adam')\nhistory = model.fit(X, y, nb_epoch=EPOCHS, validation_split=0.2, shuffle=True)\n\nmodel.save('model.h5')\nprint('Saved model.h5')\n\n# because some wierd exceptions happens sometimes\n# https://stackoverflow.com/questions/40560795/tensorflow-attributeerror-nonetype-object-has-no-attribute-tf-deletestatus\nimport gc; gc.collect()\n", "sub_path": "nw.py", "file_name": "nw.py", "file_ext": "py", "file_size_in_byte": 3617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "random.randrange", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2RGB", "line_number": 26, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.use", "line_number": 85, "usage_type": "call"}, {"api_name": "pylab.subplots", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.python", "line_number": 94, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.core.Lambda", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Convolution2D", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Convolution2D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Convolution2D", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.core.Flatten", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 119, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "616105448", "text": "import pywt\nimport numpy as np\nimport tensorflow as tf\n\n#from tensorflow.contrib import rnn\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\ndef entry():\n X_fill = load_data(\"train_filled.csv\")\n X_wv = denoise(X_fill)\n X_train, Y_train, X_test, Y_test = split(X_wv)\n Y_sae, Y_sae_test = stackedAutoencoders(X_train, X_test)\n\n Y_hat, Y_hat_train = LSTM(Y_sae, Y_train, Y_sae_test)\n accuracy_test = metric(Y_hat, Y_test)\n accuravy_train = metric(Y_hat_train, Y_train)\n\n print(\"Training Set Accuracy: \" + str(accuravy_train*100) + \"%\")\n print(\"Test Set Accuracy: \" + str(accuracy_test*100) + \"%\")\n\n#loads data\ndef load_data(filename):\n return np.loadtxt(filename, delimiter=',')\n\n#applies wavelet transform\ndef denoise(X):\n m, n = X.shape\n\n first_part = np.zeros((m, 28))\n third_part = np.zeros((m, 64))\n for row in range(m):\n for col1 in range(28):\n first_part[row][col1] = X[row][col1]\n for col2 in range(64):\n third_part[row][col2] = X[row][col2]\n\n wav = pywt.Wavelet('haar')\n\n D = np.zeros((m, 120))\n for i, xi in enumerate(X):\n coeffs = pywt.wavedec(xi[28:147], wav, mode='symmetric', level=1)\n cA, cD = coeffs\n cA = np.array(cA)\n cD = np.array(cD)\n D[i][:] = np.concatenate((cA, cD))\n return np.concatenate((first_part, D, third_part), axis=1)\n\n#splits data into X train, Y train, X test, Y test\ndef split(X_raw):\n m, n = X_raw.shape\n np.random.shuffle(X_raw)\n X_train = np.zeros((30000, 147))\n Y_train = np.zeros((30000, 62))\n X_test = np.zeros((10000, 147))\n Y_test = np.zeros((10000, 62))\n for row in range(m):\n if row < 30000:\n for col1 in range(1, 148):\n X_train[row][col1-1] = X_raw[row][col1]\n for col2 in range(148, 210):\n Y_train[row][col2-148] = X_raw[row][col2]\n else:\n for col1 in range(1, 148):\n X_test[row-30000][col1-1] = X_raw[row][col1]\n for col2 in range(148, 210):\n Y_test[row-30000][col2-148] = X_raw[row][col2]\n return X_train.T, Y_train.T, X_test.T, Y_test.T\n\n# Trains the stacked Autoencoders and then passes both X_train and X_test\n# into the SAE for next steps. 147->74->50->74->147\ndef stackedAutoencoders(X_input_train, X_input_test):\n # Define parameters\n num_examples = 30000\n num_inputs = 147\n num_hid1 = 74\n num_hid2 = 50\n num_hid3 = num_hid1\n num_output = num_inputs\n lr = 0.01\n actf = tf.nn.relu\n num_epoch = 1\n batch_size = 200\n\n # Create inputs\n X = tf.placeholder(tf.float32, shape=[num_inputs, 30000])\n X_test = tf.placeholder(tf.float32, shape=[num_inputs, 10000])\n\n # Define variables\n W1 = tf.get_variable(\"W1\", [74, 147], initializer=tf.contrib.layers.xavier_initializer())\n b1 = tf.get_variable(\"b1\", [74, 1], initializer=tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [50, 74], initializer=tf.contrib.layers.xavier_initializer())\n b2 = tf.get_variable(\"b2\", [50, 1], initializer=tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [74, 50], initializer=tf.contrib.layers.xavier_initializer())\n b3 = tf.get_variable(\"b3\", [74, 1], initializer=tf.zeros_initializer())\n W4 = tf.get_variable(\"W4\", [147, 74], initializer=tf.contrib.layers.xavier_initializer())\n b4 = tf.get_variable(\"b4\", [147, 1], initializer=tf.zeros_initializer())\n\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3, \"W4\": W4, \"b4\": b4}\n\n hid_layer1_train = actf(tf.matmul(W1, X)+b1)\n hid_layer2_train = actf(tf.matmul(W2, hid_layer1_train)+b2)\n hid_layer3_train = actf(tf.matmul(W3, hid_layer2_train)+b3)\n output_layer = actf(tf.matmul(W4, hid_layer3_train)+b4)\n\n hid_layer1_test = actf(tf.matmul(W1, X_test)+b1)\n hid_layer2_test = actf(tf.matmul(W2, hid_layer1_test)+b2)\n\n loss = tf.reduce_mean(tf.square(output_layer-X))\n\n optimizer = tf.train.AdamOptimizer(lr)\n train = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n sess.run(train, feed_dict={X:X_input_train})\n\n y_sae_train = sess.run(hid_layer2_train, feed_dict={X:X_input_train})\n y_sae_test = sess.run(hid_layer2_test, feed_dict={X_test:X_input_test})\n\n return y_sae_train, y_sae_test\n\n# Creating LSTM\ndef myLSTM(X, Y, X_test):\n #Dropout parameter\n drop = 0.1\n\n # Initialising the RNN\n regressor = Sequential()\n\n # Adding some Dropout regularisation and more RNN layers\n regressor.add(Dropout(drop))\n regressor.add(Sequential())\n regressor.add(Dropout(drop))\n regressor.add(Sequential())\n regressor.add(Dropout(drop))\n\n # Adding the output layer\n regressor.add(Dense(62))\n\n # Compiling the RNN\n regressor.compile(optimizer='adam', loss='mean_squared_error')\n\n # Fitting the RNN to the Training set\n regressor.fit(X.T, Y.T, epochs=25, batch_size=200)\n\n Y_hat = regressor.predict(X_test.T)\n Y_hat_train = regressor.predict(X.T)\n\n return Y_hat, Y_hat_train\n\n#calculates accuracy of our model\ndef metric(Y_hat, Y):\n Y_hat_sign = np.sign(Y_hat.T)\n Y_sign = np.sign(Y)\n results = np.equal(Y_hat_sign, Y_sign)\n num_correct = np.sum(results)\n total = results.shape[0] * results.shape[1]\n return float(num_correct) / total\n\nif __name__ == \"__main__\":\n entry()\n", "sub_path": "src/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "keras.layers.LSTM", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "pywt.Wavelet", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "pywt.wavedec", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 134, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 137, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 138, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 140, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "97753791", "text": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2015-2016 MIT Probabilistic Computing Project\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom scipy.stats import norm\nfrom sklearn.neighbors import KDTree\n\nfrom cgpm.cgpm import CGpm\nfrom cgpm.utils import data as du\nfrom cgpm.utils import general as gu\n\nLocalGpm = namedtuple('LocalGpm', ['simulate', 'logpdf'])\n\n\nclass MultivariateKnn(CGpm):\n \"\"\"Multivariate K-Nearest-Neighbors builds local statistical models on a\n per-query basis.\n\n Algorithm for simulate(rowid, targets, constraints) and\n logpdf(rowid, targets, constraints):\n\n - Find K nearest neighbors to `rowid` based only on the `constraints`.\n\n - For each nearest neighbor k = 1,...,K\n\n - Find M nearest neighbors of k (called the locality of k) based\n on both the `constraints` and `targets` dimensions.\n\n - For each target variable q \\in target:\n\n - Learn a primitive univariate CGPM, using the dimension q of\n the M neighbors in the locality of k.\n\n - Return a product CGPM G_k representing locality k.\n\n Overall CGPM G = (1/K)*G_1 + ... + (1/K)*G_K is a simple-weighted\n mixture of the product CGPM learned in each locality.\n\n This \"locality-based\" algorithm is designed to capture the dependence\n between the target variables, rather than assume that all the target\n variables are independent conditioned on the constraints. Github ticket #133\n will support selecting either the independent or locality-based versions of\n KNN.\n \"\"\"\n\n def __init__(self, outputs, inputs, K=None, M=None, distargs=None,\n params=None, rng=None):\n # Input validation.\n self._validate_init(outputs, inputs, K, M, distargs, params, rng)\n # Default arguments.\n if params is None:\n params = {}\n if rng is None:\n rng = gu.gen_rng(1)\n if M is None:\n M = K\n # Build the object.\n self.rng = rng\n # Varible indexes.\n self.outputs = outputs\n self.inputs = []\n # Distargs.\n self.stattypes = distargs['outputs']['stattypes']\n self.statargs = distargs['outputs']['statargs']\n self.levels = {\n o: self.statargs[i]['k']\n for i, o in enumerate(outputs) if self.stattypes[i] != 'numerical'\n }\n # Dataset.\n self.data = OrderedDict()\n self.N = 0\n # Ordering of the chain.\n self.ordering = list(self.rng.permutation(self.outputs))\n # Number of nearest neighbors.\n self.K = K\n self.M = M\n\n def incorporate(self, rowid, observation, inputs=None):\n self._validate_incorporate(rowid, observation, inputs)\n # Incorporate observed variables.\n x = [observation.get(q, np.nan) for q in self.outputs]\n # Update dataset and counts.\n self.data[rowid] = x\n self.N += 1\n\n def unincorporate(self, rowid):\n self._validate_unincorporate(rowid)\n del self.data[rowid]\n self.N -= 1\n\n def logpdf(self, rowid, targets, constraints=None, inputs=None):\n constraints = self.populate_constraints(rowid, targets, constraints)\n # XXX Disable logpdf queries without constraints.\n if inputs:\n raise ValueError('Prohibited inputs: %s' % (inputs,))\n if not constraints:\n raise ValueError('Provide at least one constraint: %s'\n % (constraints,))\n self._validate_simulate_logpdf(rowid, targets, constraints)\n # Retrieve the dataset and neighborhoods.\n dataset, neighborhoods = self._find_neighborhoods(targets, constraints)\n models = [self._create_local_model_joint(targets, dataset[n])\n for n in neighborhoods]\n # Compute logpdf in each neighborhood and simple average.\n lp = [m.logpdf(targets) for m in models]\n return gu.logsumexp(lp) - np.log(len(models))\n\n def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):\n if inputs:\n raise ValueError('Prohibited inputs: %s' % (inputs,))\n N_sim = 1 if N is None else N\n constraints = self.populate_constraints(rowid, targets, constraints)\n self._validate_simulate_logpdf(rowid, targets, constraints, N_sim)\n if constraints:\n # Retrieve the dataset and neighborhoods.\n dataset, neighborhoods = self._find_neighborhoods(\n targets, constraints)\n models = [self._create_local_model_joint(targets, dataset[n])\n for n in neighborhoods]\n # Sample the models.\n indices = self.rng.choice(len(models), size=N_sim)\n # Sample from each model.\n sampled_models = [models[i] for i in indices]\n results = [m.simulate(targets) for m in sampled_models]\n else:\n results = self._simulate_fallback(rowid, targets, N_sim)\n assert len(results) == N_sim\n return results[0] if N is None else results\n\n def _simulate_fallback(self, rowid, targets, N):\n # Fallback: if there is no such constraints to resample from, then\n # resample the first variable.\n merged = len(targets) == len(self.outputs)\n targets_dummy = [o for o in self.outputs if o not in targets]\n if merged:\n assert not targets_dummy\n targets_dummy = [targets[0]]\n targets = targets[1:]\n dataset = self._dataset(targets_dummy)\n indices = self.rng.choice(len(dataset), size=N)\n constraints = [zip(targets_dummy, dataset[i]) for i in indices]\n results = [self.simulate(rowid, targets, dict(e)) for e in constraints]\n # Make sure to add back the resampled first target variable to results.\n if merged:\n results = [gu.merged(s, e) for s, e in zip(results, constraints)]\n return results\n\n def logpdf_score(self):\n pass\n\n def transition(self, N=None):\n return\n\n # --------------------------------------------------------------------------\n # Internal.\n\n def _find_neighborhoods(self, targets, constraints):\n if not constraints:\n raise ValueError('No constraints in neighbor search.')\n if any(np.isnan(v) for v in constraints.values()):\n raise ValueError('Nan constraints in neighbor search.')\n # Extract the targets, constraints from the dataset.\n lookup = list(targets) + list(constraints)\n D = self._dataset(lookup)\n # Not enough neighbors: crash for now. Workarounds include:\n # (i) reduce K, (ii) randomly drop constraints, (iii) impute dataset.\n if len(D) < self.K:\n raise ValueError('Not enough neighbors: %s'\n % ((targets, constraints),))\n # Code the dataset with Euclidean embedding.\n N = len(targets)\n D_qr_code = self._dummy_code(D[:,:N], lookup[:N])\n D_ev_code = self._dummy_code(D[:,N:], lookup[N:])\n D_code = np.column_stack((D_qr_code, D_ev_code))\n # Run nearest neighbor search on the constraints only.\n constraints_code = self._dummy_code(\n [constraints.values()], constraints.keys())\n dist, neighbors = KDTree(D_ev_code).query(constraints_code, k=len(D))\n # Check for equidistant neighbors and possibly extend the search.\n valid = [i for i, d in enumerate(dist[0]) if d <= dist[0][self.K-1]]\n if self.K < len(valid):\n neighbors = self.rng.choice(neighbors[0][valid],\n replace=False, size=self.K)\n else:\n neighbors = neighbors[0][:self.K]\n # For each neighbor, find its nearest M on the full lookup set.\n _, ex = KDTree(D_code).query(D_code[neighbors], k=min(self.M, self.K))\n # Return the dataset and the list of neighborhoods.\n return D[:,:len(targets)], ex\n\n def _create_local_model_joint(self, targets, dataset):\n assert all(q in self.outputs for q in targets)\n assert dataset.shape[1] == len(targets)\n lookup = {\n 'numerical': self._create_local_model_numerical,\n 'categorical': self._create_local_model_categorical,\n 'nominal': self._create_local_model_categorical,\n }\n models = {\n q: lookup[self.stattypes[self.outputs.index(q)]](q, dataset[:,i])\n for i, q in enumerate(targets)}\n simulate = lambda q, N=None: {c: models[c].simulate(N) for c in q}\n logpdf = lambda q: sum(models[c].logpdf(x) for c,x in q.iteritems())\n return LocalGpm(simulate, logpdf)\n\n def _create_local_model_numerical(self, q, locality):\n assert q not in self.levels\n (mu, std) = (np.mean(locality), max(np.std(locality), .01))\n simulate = lambda N=None: self.rng.normal(mu, std, size=N)\n logpdf = lambda x: norm.logpdf(x, mu, std)\n return LocalGpm(simulate, logpdf)\n\n def _create_local_model_categorical(self, q, locality):\n assert q in self.levels\n assert all(0 <= l < self.levels[q] for l in locality)\n counts = np.bincount(locality.astype(int), minlength=self.levels[q])\n p = counts / np.sum(counts, dtype=float)\n simulate = lambda N: self.rng.choice(self.levels[q], p=p, size=N)\n logpdf = lambda x: np.log(p[x])\n return LocalGpm(simulate, logpdf)\n\n def _dummy_code(self, D, variables):\n levels = {variables.index(l): self.levels[l]\n for l in variables if l in self.levels}\n return D if not levels\\\n else np.asarray([du.dummy_code(r, levels) for r in D])\n\n def _dataset(self, outputs):\n indexes = [self.outputs.index(q) for q in outputs]\n X = np.asarray(self.data.values())[:,indexes]\n return X[~np.any(np.isnan(X), axis=1)]\n\n def _stattypes(self, outputs):\n indexes = [self.outputs.index(q) for q in outputs]\n return [self.stattypes[i] for i in indexes]\n\n def populate_constraints(self, rowid, targets, constraints):\n if constraints is None:\n constraints = {}\n if rowid in self.data:\n values = self.data[rowid]\n assert len(values) == len(self.outputs)\n observations = {\n output : value\n for output, value in zip(self.outputs, values)\n if not np.isnan(value)\n and output not in targets\n and output not in constraints\n }\n constraints = gu.merged(constraints, observations)\n return constraints\n\n def get_params(self):\n return {}\n\n def get_distargs(self):\n return {\n 'outputs': {\n 'stattypes': self.stattypes,\n 'statargs': self.statargs,\n },\n 'K': self.K,\n 'M': self.M,\n }\n\n @staticmethod\n def name():\n return 'multivariate_knn'\n\n # --------------------------------------------------------------------------\n # Validation.\n\n def _validate_init(self, outputs, inputs, K, M, distargs, params, rng):\n # No inputs allowed.\n if inputs:\n raise ValueError('KNN rejects inputs: %s.' % inputs)\n # At least one output.\n if len(outputs) < 2:\n raise ValueError('KNN needs >= 2 outputs: %s.' % outputs)\n # Unique outputs.\n if len(set(outputs)) != len(outputs):\n raise ValueError('Duplicate outputs: %s.' % outputs)\n # Ensure outputs in distargs.\n if not distargs or 'outputs' not in distargs:\n raise ValueError('Missing distargs: %s.' % distargs)\n # Ensure K is positive.\n if K is None or K < 1:\n raise ValueError('Invalid K for nearest neighbors: %s.' % K)\n # Ensure stattypes and statargs in distargs['outputs]'\n if 'stattypes' not in distargs['outputs']\\\n or 'statargs' not in distargs['outputs']:\n raise ValueError('Missing output stattypes: %s.' % distargs)\n # Ensure stattypes correct length.\n if len(distargs['outputs']['stattypes']) != len(outputs):\n raise ValueError('Wrong number of stattypes: %s.' % distargs)\n # Ensure statargs correct length.\n if len(distargs['outputs']['statargs']) != len(outputs):\n raise ValueError('Wrong number of statargs: %s.' % distargs)\n # Ensure number of categories provided as k.\n if any('k' not in distargs['outputs']['statargs'][i]\n for i in xrange(len(outputs))\n if distargs['outputs']['stattypes'][i] != 'numerical'):\n raise ValueError('Missing number of categories k: %s' % distargs)\n\n def _validate_simulate_logpdf(self, rowid, targets, constraints, N=None):\n # No invalid number of samples.\n if N is not None and N <= 0:\n raise ValueError('Unknown number of samples: %s.' % N)\n # At least K observations before we can do K nearest neighbors.\n if self.N < self.K:\n raise ValueError('MultivariateKnn needs >= %d observations: %d'\n % (self.K, self.N))\n # Need targets.\n if not targets:\n raise ValueError('No targets specified: %s.' % targets)\n # All targets in outputs.\n if any(q not in self.outputs for q in targets):\n raise ValueError('Unknown variables in targets: %s, %s'\n % (self.outputs, targets))\n # All constraints in outputs.\n if any(e not in self.outputs for e in constraints):\n raise ValueError('Unknown variables in constraints: %s,%s'\n % (self.outputs, constraints))\n # No duplicate variables in targets and constraints.\n if any(q in constraints for q in targets):\n raise ValueError('Duplicate variable in targets/constraints: %s %s'\n % (targets, constraints))\n # Check for a nan in constraints.\n if any(np.isnan(v) for v in constraints.itervalues()):\n raise ValueError('Nan value in constraints: %s.' % constraints)\n # Check for a nan in targets.,\n if isinstance(targets, dict)\\\n and any(np.isnan(v) for v in targets.itervalues()):\n raise ValueError('Nan value in targets: %s.' % targets)\n\n def _validate_incorporate(self, rowid, observation, inputs):\n # No duplicate observation.\n if rowid in self.data:\n raise ValueError('Already observed: %d.' % rowid)\n # No inputs.\n if inputs:\n raise ValueError('No inputs allowed: %s.' % inputs)\n # Missing observation.\n if not observation:\n raise ValueError('No observation specified: %s.' % observation)\n # No unknown variables.\n if any(q not in self.outputs for q in observation):\n raise ValueError('Unknown variables: (%s,%s).'\n % (observation, self.outputs))\n\n def _validate_unincorporate(self, rowid):\n if rowid not in self.data:\n raise ValueError('No such observation: %d.' % rowid)\n\n # --------------------------------------------------------------------------\n # Serialization.\n\n def to_metadata(self):\n metadata = dict()\n metadata['outputs'] = self.outputs\n metadata['inputs'] = self.inputs\n metadata['distargs'] = self.get_distargs()\n metadata['N'] = self.N\n metadata['data'] = self.data.items()\n\n metadata['params'] = dict()\n\n metadata['factory'] = ('cgpm.knn.mvknn', 'MultivariateKnn')\n return metadata\n\n @classmethod\n def from_metadata(cls, metadata, rng=None):\n if rng is None:\n rng = gu.gen_rng(0)\n knn = cls(\n outputs=metadata['outputs'],\n inputs=metadata['inputs'],\n K=metadata['distargs']['K'], # Pending migration to **kwargs\n M=metadata['distargs']['M'],\n distargs=metadata['distargs'],\n params=metadata['params'],\n rng=rng)\n knn.data = OrderedDict(metadata['data'])\n knn.N = metadata['N']\n return knn\n", "sub_path": "src/knn/mvknn.py", "file_name": "mvknn.py", "file_ext": "py", "file_size_in_byte": 16692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.namedtuple", "line_number": 29, "usage_type": "call"}, {"api_name": "cgpm.cgpm.CGpm", "line_number": 32, "usage_type": "name"}, {"api_name": "cgpm.utils.general.gen_rng", "line_number": 71, "usage_type": "call"}, {"api_name": "cgpm.utils.general", "line_number": 71, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cgpm.utils.general.logsumexp", "line_number": 123, "usage_type": "call"}, {"api_name": "cgpm.utils.general", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 123, "usage_type": "call"}, {"api_name": "cgpm.utils.general.merged", "line_number": 162, "usage_type": "call"}, {"api_name": "cgpm.utils.general", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 191, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KDTree", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KDTree", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 225, "usage_type": "call"}, {"api_name": "scipy.stats.norm.logpdf", "line_number": 227, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 227, "usage_type": "name"}, {"api_name": "numpy.bincount", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 243, "usage_type": "call"}, {"api_name": "cgpm.utils.data.dummy_code", "line_number": 243, "usage_type": "call"}, {"api_name": "cgpm.utils.data", "line_number": 243, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 263, "usage_type": "call"}, {"api_name": "cgpm.utils.general.merged", "line_number": 267, "usage_type": "call"}, {"api_name": "cgpm.utils.general", "line_number": 267, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 350, "usage_type": "call"}, {"api_name": "cgpm.utils.general.gen_rng", "line_number": 391, "usage_type": "call"}, {"api_name": "cgpm.utils.general", "line_number": 391, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 400, "usage_type": "call"}]} +{"seq_id": "406594349", "text": "\"\"\"\nUnit tests for Sample class\n\"\"\"\nimport unittest\nimport sys\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\n\nsys.path.append(os.path.abspath('../..'))\n\nfrom flowkit import Sample, transforms\n\ndata1_fcs_path = 'examples/gate_ref/data1.fcs'\ndata1_sample = Sample(data1_fcs_path)\n\nxform_logicle = transforms.LogicleTransform('logicle', param_t=10000, param_w=0.5, param_m=4.5, param_a=0)\nxform_biex1 = transforms.WSPBiexTransform('neg0', width=-100.0, negative=0.0)\nxform_biex2 = transforms.WSPBiexTransform('neg1', width=-100.0, negative=1.0)\n\n\nclass SampleTestCase(unittest.TestCase):\n \"\"\"Tests for loading FCS files as Sample objects\"\"\"\n def test_load_from_fcs_file_path(self):\n \"\"\"Test creating Sample object from an FCS file path\"\"\"\n fcs_file_path = \"examples/test_data_2d_01.fcs\"\n\n sample = Sample(fcs_path_or_data=fcs_file_path)\n\n self.assertIsInstance(sample, Sample)\n\n def test_load_from_pathlib(self):\n \"\"\"Test creating Sample object from a pathlib Path object\"\"\"\n fcs_file_path = \"examples/test_data_2d_01.fcs\"\n path = Path(fcs_file_path)\n sample = Sample(fcs_path_or_data=path)\n\n self.assertIsInstance(sample, Sample)\n\n def test_load_from_numpy_array(self):\n npy_file_path = \"examples/test_comp_example.npy\"\n channels = [\n 'FSC-A', 'FSC-W', 'SSC-A',\n 'Ax488-A', 'PE-A', 'PE-TR-A',\n 'PerCP-Cy55-A', 'PE-Cy7-A', 'Ax647-A',\n 'Ax700-A', 'Ax750-A', 'PacBlu-A',\n 'Qdot525-A', 'PacOrange-A', 'Qdot605-A',\n 'Qdot655-A', 'Qdot705-A', 'Time'\n ]\n\n npy_data = np.fromfile(npy_file_path)\n\n sample = Sample(\n npy_data,\n channel_labels=channels\n )\n\n self.assertIsInstance(sample, Sample)\n\n def test_load_from_pandas_multi_index(self):\n sample_orig = Sample(\"examples/100715.fcs\")\n pnn_orig = sample_orig.pnn_labels\n pns_orig = sample_orig.pns_labels\n\n df = sample_orig.as_dataframe(source='orig')\n\n sample_new = Sample(df)\n pnn_new = sample_new.pnn_labels\n pns_new = sample_new.pns_labels\n\n self.assertListEqual(pnn_orig, pnn_new)\n self.assertListEqual(pns_orig, pns_new)\n\n def test_load_from_unsupported_object(self):\n \"\"\"Test Sample constructor raises ValueError loading an unsupported object\"\"\"\n self.assertRaises(ValueError, Sample, object())\n\n def test_comp_matrix_from_csv(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = \"examples/comp_complete_example.csv\"\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n self.assertIsNotNone(sample._comp_events)\n\n def test_clearing_comp_events(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = \"examples/comp_complete_example.csv\"\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n sample.apply_compensation(None)\n\n self.assertIsNone(sample._comp_events)\n\n def test_comp_matrix_from_pathlib_path(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n self.assertIsNotNone(sample._comp_events)\n\n def test_get_metadata(self):\n \"\"\"Test Sample method get_metadata\"\"\"\n fcs_file_path = \"examples/test_data_2d_01.fcs\"\n\n sample = Sample(fcs_path_or_data=fcs_file_path)\n meta = sample.get_metadata()\n\n self.assertEqual(len(meta), 20)\n self.assertEqual(meta['p1n'], 'channel_A')\n\n @staticmethod\n def test_get_channel_index_by_channel_number_int():\n chan_number = data1_sample.get_channel_index(1)\n\n np.testing.assert_equal(0, chan_number)\n\n def test_get_channel_index_fails_by_chan_number_0(self):\n # chan numbers are indexed at 1, not 0\n self.assertRaises(ValueError, data1_sample.get_channel_index, 0)\n\n def test_get_channel_index_fails(self):\n # give an unsupported list as the arg\n self.assertRaises(ValueError, data1_sample.get_channel_index, [0, 1])\n\n @staticmethod\n def test_get_channel_data_raw():\n data_idx_0 = data1_sample.get_channel_data(0, source='raw')\n\n np.testing.assert_equal(data1_sample._raw_events[:, 0], data_idx_0)\n\n @staticmethod\n def test_get_channel_data_comp():\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n data_idx_6 = sample.get_channel_data(6, source='comp')\n\n np.testing.assert_equal(sample._comp_events[:, 6], data_idx_6)\n\n @staticmethod\n def test_get_channel_data_xform():\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n sample.apply_transform(xform_logicle)\n\n data_idx_6 = sample.get_channel_data(6, source='xform')\n\n np.testing.assert_equal(sample._transformed_events[:, 6], data_idx_6)\n\n def test_get_channel_data_subsample_fails(self):\n self.assertRaises(\n ValueError,\n data1_sample.get_channel_data,\n 0,\n source='raw',\n subsample=True\n )\n\n def test_get_channel_data_subsample(self):\n sample = Sample(data1_fcs_path)\n sample.subsample_events(500)\n\n data_idx_6 = sample.get_channel_data(6, source='raw', subsample=True)\n\n self.assertEqual(len(data_idx_6), 500)\n\n def test_get_subsampled_orig_events(self):\n sample = Sample(data1_fcs_path)\n sample.subsample_events(500)\n\n events = sample.get_orig_events(subsample=True)\n\n self.assertEqual(events.shape[0], 500)\n\n def test_get_subsampled_raw_events(self):\n sample = Sample(data1_fcs_path)\n sample.subsample_events(500)\n\n events = sample.get_raw_events(subsample=True)\n\n self.assertEqual(events.shape[0], 500)\n\n def test_get_subsampled_comp_events(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n sample.subsample_events(500)\n\n events = sample.get_comp_events(subsample=True)\n\n self.assertEqual(events.shape[0], 500)\n\n def test_get_subsampled_xform_events(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n sample.apply_transform(xform_logicle)\n\n sample.subsample_events(500)\n\n events = sample.get_transformed_events(subsample=True)\n\n self.assertEqual(events.shape[0], 500)\n\n def test_get_comp_events_if_no_comp(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n comp_events = sample.get_comp_events()\n\n self.assertIsNone(comp_events)\n\n def test_get_transformed_events_if_no_xform(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n xform_events = sample.get_transformed_events()\n\n self.assertIsNone(xform_events)\n\n @staticmethod\n def test_get_transformed_events_exclude_scatter():\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n sample.apply_transform(xform_logicle, include_scatter=False)\n\n fsc_a_index = sample.get_channel_index('FSC-A')\n data_fsc_a = sample.get_channel_data(fsc_a_index, source='xform')\n\n np.testing.assert_equal(sample._raw_events[:, fsc_a_index], data_fsc_a)\n\n def test_get_transformed_events_include_scatter(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n sample.apply_transform(xform_logicle, include_scatter=True)\n\n fsc_a_index = sample.get_channel_index('FSC-A')\n data_fsc_a_xform = sample.get_channel_data(fsc_a_index, source='xform')\n data_fsc_a_raw = sample.get_channel_data(fsc_a_index, source='raw')\n\n np.testing.assert_equal(sample._transformed_events[:, fsc_a_index], data_fsc_a_xform)\n self.assertEqual(data_fsc_a_raw[0], 118103.25)\n self.assertEqual(round(data_fsc_a_xform[0], 3), 1.238)\n\n def test_get_events_as_data_frame_xform(self):\n data1_sample.apply_transform(xform_logicle)\n df = data1_sample.as_dataframe(source='xform')\n\n self.assertIsInstance(df, pd.DataFrame)\n np.testing.assert_equal(df.values, data1_sample.get_transformed_events())\n\n def test_get_events_as_data_frame_comp(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = \"examples/comp_complete_example.csv\"\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n df = sample.as_dataframe(source='comp')\n\n self.assertIsInstance(df, pd.DataFrame)\n np.testing.assert_equal(df.values, sample.get_comp_events())\n\n def test_get_events_as_data_frame_raw(self):\n df = data1_sample.as_dataframe(source='raw')\n\n self.assertIsInstance(df, pd.DataFrame)\n np.testing.assert_equal(df.values, data1_sample.get_raw_events())\n\n def test_get_events_as_data_frame_orig(self):\n df = data1_sample.as_dataframe(source='orig')\n\n self.assertIsInstance(df, pd.DataFrame)\n np.testing.assert_equal(df.values, data1_sample.get_orig_events())\n\n def test_get_events_as_data_frame_column_order(self):\n orig_col_order = ['FSC-H', 'SSC-H', 'FL1-H', 'FL2-H', 'FL3-H', 'FL2-A', 'FL4-H', 'Time']\n new_col_order = ['FSC-H', 'SSC-H', 'FL1-H', 'FL2-H', 'FL2-A', 'FL3-H', 'FL4-H', 'Time']\n col_to_check = 'FL2-A'\n\n df = data1_sample.as_dataframe(source='raw')\n df_reorder = data1_sample.as_dataframe(source='raw', col_order=new_col_order)\n\n self.assertListEqual(list(df.columns.get_level_values(0)), orig_col_order)\n self.assertListEqual(list(df_reorder.columns.get_level_values(0)), new_col_order)\n\n np.testing.assert_equal(df[col_to_check].values, df_reorder[col_to_check])\n\n def test_get_events_as_data_frame_new_column_names(self):\n new_cols = ['FSC-H', 'SSC-H', 'FLR1-H', 'FLR2-H', 'FLR3-H', 'FLR2-A', 'FLR4-H', 'Time']\n\n df = data1_sample.as_dataframe(source='raw', col_names=new_cols)\n\n self.assertListEqual(list(df.columns), new_cols)\n\n @staticmethod\n def test_fully_custom_transform():\n sample1 = Sample(fcs_path_or_data=data1_fcs_path)\n sample2 = Sample(fcs_path_or_data=data1_fcs_path)\n\n custom_xforms = {\n 'FL1-H': xform_biex1,\n 'FL2-H': xform_biex1,\n 'FL3-H': xform_biex2,\n 'FL2-A': xform_biex1,\n 'FL4-H': xform_biex1\n }\n\n sample1.apply_transform(xform_biex1)\n sample2.apply_transform(custom_xforms)\n\n fl2_idx = sample1.get_channel_index('FL2-H')\n fl3_idx = sample1.get_channel_index('FL3-H')\n\n s1_fl2 = sample1.get_channel_data(fl2_idx, source='xform')\n s2_fl2 = sample2.get_channel_data(fl2_idx, source='xform')\n s1_fl3 = sample1.get_channel_data(fl3_idx, source='xform')\n s2_fl3 = sample2.get_channel_data(fl3_idx, source='xform')\n\n np.testing.assert_equal(s1_fl2, s2_fl2)\n np.testing.assert_raises(AssertionError, np.testing.assert_equal, s1_fl3, s2_fl3)\n\n def test_create_fcs(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n sample.export(\"test_fcs_export.fcs\", source='comp', directory=\"examples\")\n\n exported_fcs_file = \"examples/test_fcs_export.fcs\"\n exported_sample = Sample(fcs_path_or_data=exported_fcs_file)\n os.unlink(exported_fcs_file)\n\n self.assertIsInstance(exported_sample, Sample)\n\n # TODO: Excluding time channel here, as the difference was nearly 0.01. Need to investigate why the\n # exported comp data isn't exactly equal\n np.testing.assert_almost_equal(sample._comp_events[:, :-1], exported_sample._raw_events[:, :-1], decimal=3)\n\n def test_create_csv(self):\n fcs_file_path = \"examples/test_comp_example.fcs\"\n comp_file_path = Path(\"examples/comp_complete_example.csv\")\n\n sample = Sample(\n fcs_path_or_data=fcs_file_path,\n compensation=comp_file_path,\n ignore_offset_error=True # sample has off by 1 data offset\n )\n\n sample.export(\"test_fcs_export.csv\", source='comp', directory=\"examples\")\n\n exported_csv_file = \"examples/test_fcs_export.csv\"\n exported_df = pd.read_csv(exported_csv_file)\n exported_sample = Sample(exported_df)\n os.unlink(exported_csv_file)\n\n self.assertIsInstance(exported_sample, Sample)\n\n # TODO: Need to investigate why the exported comp data isn't exactly equal\n np.testing.assert_almost_equal(sample._comp_events[:, :], exported_sample._raw_events[:, :], decimal=3)\n\n def test_filter_negative_scatter(self):\n # there are 2 negative SSC-A events in this file (of 65016 total events)\n fcs_file_path = \"examples/100715.fcs\"\n sample = Sample(fcs_path_or_data=fcs_file_path)\n sample.subsample_events(50000)\n sample.filter_negative_scatter(reapply_subsample=False)\n\n # using the default seed, the 2 negative events are in the subsample\n common_idx = np.intersect1d(sample.subsample_indices, sample.negative_scatter_indices)\n self.assertEqual(len(common_idx), 2)\n\n sample.filter_negative_scatter(reapply_subsample=True)\n common_idx = np.intersect1d(sample.subsample_indices, sample.negative_scatter_indices)\n self.assertEqual(len(common_idx), 0)\n\n self.assertEqual(sample.negative_scatter_indices.shape[0], 2)\n\n def test_filter_anomalous_events(self):\n # there are 2 negative SSC-A events in this file (of 65016 total events)\n fcs_file_path = \"examples/100715.fcs\"\n sample = Sample(fcs_path_or_data=fcs_file_path)\n sample.subsample_events(50000)\n sample.filter_anomalous_events(reapply_subsample=False)\n\n # using the default seed, the 2 negative events are in the subsample\n common_idx = np.intersect1d(sample.subsample_indices, sample.anomalous_indices)\n self.assertGreater(len(common_idx), 0)\n\n sample.filter_anomalous_events(reapply_subsample=True)\n common_idx = np.intersect1d(sample.subsample_indices, sample.anomalous_indices)\n self.assertEqual(len(common_idx), 0)\n\n self.assertGreater(sample.anomalous_indices.shape[0], 0)\n", "sub_path": "flowkit/tests/sample_tests.py", "file_name": "sample_tests.py", "file_ext": "py", "file_size_in_byte": 16804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 16, "usage_type": "call"}, {"api_name": "flowkit.transforms.LogicleTransform", "line_number": 18, "usage_type": "call"}, {"api_name": "flowkit.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "flowkit.transforms.WSPBiexTransform", "line_number": 19, "usage_type": "call"}, {"api_name": "flowkit.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "flowkit.transforms.WSPBiexTransform", "line_number": 20, "usage_type": "call"}, {"api_name": "flowkit.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 29, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 31, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 37, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 39, "usage_type": "argument"}, {"api_name": "numpy.fromfile", "line_number": 52, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 54, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 59, "usage_type": "argument"}, {"api_name": "flowkit.Sample", "line_number": 62, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 68, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 77, "usage_type": "argument"}, {"api_name": "flowkit.Sample", "line_number": 83, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 95, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 107, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 109, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 165, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 188, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 196, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 204, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 213, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 215, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 228, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 230, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 246, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 258, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 270, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 286, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 307, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 308, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 314, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 322, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 323, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 329, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 334, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 335, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_equal", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 348, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 359, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 381, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_raises", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 382, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 386, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 388, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 397, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 398, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 400, "usage_type": "argument"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 404, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 408, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 410, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 419, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 420, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 421, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 423, "usage_type": "argument"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 426, "usage_type": "attribute"}, {"api_name": "flowkit.Sample", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 440, "usage_type": "call"}, {"api_name": "flowkit.Sample", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 457, "usage_type": "call"}]} +{"seq_id": "48534530", "text": "# encoding: utf-8\nfrom pptx.dml.color import RGBColor\nfrom pptx.util import Pt\n\n# Powerpoint output directory\nOUTPUT_DIRECTORY = \"./output\"\n\n# Powerpoint Background\nBACKGROUND_COLOR = RGBColor(0x00, 0x00, 0x00) # black\n\n# Powerpoint Bottom Banner\nBANNER_COLOR = RGBColor(0xff, 0xff, 0xff) # white\nBANNER_FONT_SIZE = Pt(20)\nBANNER_FONT_TYPE = 'Calibri'\nBANNER_HEIGHT = Pt(40)\n\n# Church metadata\nCHURCH_LOGO_PATH = './images/logo.png'\nCHURCH_LOGO_HEIGHT = BANNER_HEIGHT\nCHURCH_LOGO_WIDTH = BANNER_HEIGHT\nCHURCH_NAME = u'高贵林国语教会'\n\n# Lyrics\nSONG_TITLE_PADDING = Pt(20) # padding-left\nLYRICS_FONT_TYPE = \"Calibri\"\nLYRICS_FONT_COLOR = RGBColor(0xff, 0xff, 0xff) # white\nLYRICS_FONT_SIZE = Pt(30)\nLYRICS_EMPTY_LINE_FONT_SIZE = Pt(20)\nLYRICS_FONT_ISBOLD = True\n", "sub_path": "settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pptx.dml.color.RGBColor", "line_number": 9, "usage_type": "call"}, {"api_name": "pptx.dml.color.RGBColor", "line_number": 12, "usage_type": "call"}, {"api_name": "pptx.util.Pt", "line_number": 13, "usage_type": "call"}, {"api_name": "pptx.util.Pt", "line_number": 15, "usage_type": "call"}, {"api_name": "pptx.util.Pt", "line_number": 24, "usage_type": "call"}, {"api_name": "pptx.dml.color.RGBColor", "line_number": 26, "usage_type": "call"}, {"api_name": "pptx.util.Pt", "line_number": 27, "usage_type": "call"}, {"api_name": "pptx.util.Pt", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "345075284", "text": "from django.conf.urls import patterns,url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'^home/$', views.home, name=\"home\"),\n\turl(r'^product/$', views.product.as_view(), name='product'),\n\turl(r'^deleteproduct/?P[0-9]+/$', views.delete, name='deleteproduct'),\n\turl(r'^addproduct/$', views.addproduct.as_view(), name=\"addproduct\"),\n\turl(r'^search/$', views.search.as_view())]", "sub_path": "warehouse/managerment/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "178742228", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('beers', '0004_auto_20150131_2234'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='userprofile',\n name='user',\n ),\n migrations.DeleteModel(\n name='UserProfile',\n ),\n migrations.AddField(\n model_name='beer',\n name='alcohol',\n field=models.FloatField(default=0.0),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='brewery',\n name='origin',\n field=models.CharField(default=b'Unknown', max_length=128),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='brewery',\n name='owner',\n field=models.CharField(default=b'Unknown', max_length=128),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='trial',\n name='rating',\n field=models.PositiveIntegerField(default=0),\n preserve_default=True,\n ),\n ]\n", "sub_path": "beers/migrations/0005_auto_20150202_2326.py", "file_name": "0005_auto_20150202_2326.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.DeleteModel", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "39736511", "text": "import mock\nimport unittest\nfrom pcp_pidstat import ProcessMemoryUtil\n\nclass TestProcessMemoryUtil(unittest.TestCase):\n def setUp(self):\n self.__metric_repository = mock.Mock()\n self.__metric_repository.current_value = mock.Mock(side_effect=self.metric_repo_current_value_side_effect)\n self.__metric_repository.previous_value = mock.Mock(side_effect=self.metric_repo_previous_value_side_effect)\n\n def metric_repo_current_value_side_effect(self, metric_name,instance):\n if metric_name == 'proc.psinfo.vsize' and instance == 1:\n return 120084\n if metric_name == 'proc.psinfo.rss' and instance == 1:\n return 6272\n if metric_name == 'proc.psinfo.cmin_flt' and instance == 1:\n return 573935\n if metric_name == 'proc.psinfo.minflt' and instance == 1:\n return 14509\n if metric_name == 'proc.psinfo.cmaj_flt' and instance == 1:\n return 647\n if metric_name == 'proc.psinfo.maj_flt' and instance == 1:\n return 54\n if metric_name == 'mem.physmem':\n return 3794764\n if metric_name == 'proc.psinfo.cmd' and instance == 1:\n return \"test\"\n if metric_name == 'proc.psinfo.processor' and instance == 1:\n return 0\n if metric_name == 'proc.id.uid' and instance == 1:\n return 1\n if metric_name == 'proc.psinfo.pid' and instance == 1:\n return 1\n\n def metric_repo_previous_value_side_effect(self, metric_name,instance):\n if metric_name == 'proc.psinfo.cmin_flt' and instance == 1:\n return 573930\n if metric_name == 'proc.psinfo.minflt' and instance == 1:\n return 14500\n if metric_name == 'proc.psinfo.cmaj_flt' and instance == 1:\n return 645\n if metric_name == 'proc.psinfo.maj_flt' and instance == 1:\n return 50\n\n def test_vsize(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n\n vsize = process_memory_usage.vsize()\n\n self.assertEquals(vsize, 120084)\n\n def test_rss(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n\n rss = process_memory_usage.rss()\n\n self.assertEquals(rss, 6272)\n\n def test_mem(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n test_mem = float(\"%.2f\"%(100*float(6272)/3794764))\n\n mem = process_memory_usage.mem()\n\n self.assertEquals(mem, test_mem)\n\n def test_min_flt(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n test_min_flt = float(\"%.2f\"%(((573935 + 14509) - (573930 + 14500))/1.34))\n\n min_flt = process_memory_usage.minflt()\n\n self.assertEquals(min_flt, test_min_flt)\n\n def test_maj_flt(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n test_maj_flt = float(\"%.2f\"%(((647 + 54) - (645 + 50))/1.34))\n\n maj_flt = process_memory_usage.majflt()\n\n self.assertEquals(maj_flt, test_maj_flt)\n\n def test_pid(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n\n pid = process_memory_usage.pid()\n\n self.assertEqual(pid,1)\n\n def test_process_name(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n\n name = process_memory_usage.process_name()\n\n self.assertEqual(name,'test')\n\n\n def test_user_id(self):\n process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)\n\n user_id = process_memory_usage.user_id()\n\n self.assertEqual(user_id,1)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "test/process_memoryutil_test.py", "file_name": "process_memoryutil_test.py", "file_ext": "py", "file_size_in_byte": 3734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "unittest.TestCase", "line_number": 5, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 7, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 8, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 9, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 46, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 53, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 60, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 68, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 76, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 84, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 91, "usage_type": "call"}, {"api_name": "pcp_pidstat.ProcessMemoryUtil", "line_number": 99, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "247220873", "text": "# Copyright 2022, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom compressed_communication.aggregators.comparison_methods import three_lc\n\n\n_test_value_type_integer_tensor = (tf.int32, (3,))\n_test_value_type_float_tensor = (tf.float32, (3,))\n_test_value_type_list_integer_tensors = [(tf.int32, (2,)),\n (tf.int32, (3,))]\n\n\nclass ThreeLCComputationTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('float_tensor', _test_value_type_float_tensor))\n def test_three_lc_properties(self, value_type):\n factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)\n value_type = tff.to_type(value_type)\n process = factory.create(value_type)\n self.assertIsInstance(process, tff.templates.AggregationProcess)\n\n server_state_type = tff.type_at_server(())\n expected_initialize_type = tff.FunctionType(\n parameter=None, result=server_state_type)\n tff.test.assert_types_equivalent(process.initialize.type_signature,\n expected_initialize_type)\n\n expected_measurements_type = tff.StructType([\n ('avg_bitrate', tf.float32),\n ('avg_distortion', tf.float32)\n ])\n expected_measurements_type = tff.type_at_server(expected_measurements_type)\n expected_next_type = tff.FunctionType(\n parameter=collections.OrderedDict(\n state=server_state_type, value=tff.type_at_clients(value_type)),\n result=tff.templates.MeasuredProcessOutput(\n state=server_state_type,\n result=tff.type_at_server(value_type),\n measurements=expected_measurements_type))\n tff.test.assert_types_equivalent(process.next.type_signature,\n expected_next_type)\n\n @parameterized.named_parameters(\n ('integer_tensor', _test_value_type_integer_tensor),\n ('list_integer_tensors', _test_value_type_list_integer_tensors))\n def test_three_lc_create_raises(self, value_type):\n factory = three_lc.ThreeLCFactory()\n value_type = tff.to_type(value_type)\n self.assertRaises(ValueError, factory.create, value_type)\n\n\nclass ThreeLCExecutionTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('float_tensor', _test_value_type_float_tensor))\n def test_correctness_one_client(self, value_type):\n factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)\n value_type = tff.to_type(value_type)\n process = factory.create(value_type)\n state = process.initialize()\n\n client_values = [tf.ones(value_type.shape)]\n expected_result = tf.ones(value_type.shape)\n expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,\n avg_distortion=0.)\n\n measurements = process.next(state, client_values).measurements\n self.assertAllClose(measurements, expected_measurements)\n result = process.next(state, client_values).result\n self.assertAllClose(result, expected_result)\n\n @parameterized.named_parameters(\n ('float_tensor', _test_value_type_float_tensor))\n def test_correctness_one_client_high_sparsity(self, value_type):\n factory = three_lc.ThreeLCFactory(sparsity_factor=10000.0)\n value_type = tff.to_type(value_type)\n process = factory.create(value_type)\n state = process.initialize()\n\n client_values = [tf.ones(value_type.shape)]\n expected_result = tf.zeros(value_type.shape)\n expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,\n avg_distortion=1.)\n\n measurements = process.next(state, client_values).measurements\n self.assertAllClose(measurements, expected_measurements)\n result = process.next(state, client_values).result\n self.assertAllClose(result, expected_result)\n\n @parameterized.named_parameters(\n ('float_tensor', _test_value_type_float_tensor))\n def test_correctness_identical_clients(self, value_type):\n factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)\n value_type = tff.to_type(value_type)\n process = factory.create(value_type)\n state = process.initialize()\n\n client_values = [[-1.0, 0.0, 100000.0] for _ in range(2)]\n expected_result = [0.0, 0.0, 200000.0]\n expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,\n avg_distortion=1./3.)\n\n measurements = process.next(state, client_values).measurements\n self.assertAllClose(measurements, expected_measurements)\n result = process.next(state, client_values).result\n self.assertAllClose(result, expected_result)\n\n @parameterized.named_parameters(\n ('float_tensor', _test_value_type_float_tensor))\n def test_correctness_different_clients(self, value_type):\n factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)\n value_type = tff.to_type(value_type)\n process = factory.create(value_type)\n state = process.initialize()\n\n client_values = [[0.0, 0.0, 2.0], [1.0, 1.0, 1.0]]\n expected_result = [1.0, 1.0, 3.0]\n expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,\n avg_distortion=0.)\n\n measurements = process.next(state, client_values).measurements\n self.assertAllClose(measurements, expected_measurements)\n result = process.next(state, client_values).result\n self.assertAllClose(result, expected_result)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "sub_path": "compressed_communication/aggregators/comparison_methods/three_lc_test.py", "file_name": "three_lc_test.py", "file_ext": "py", "file_size_in_byte": 6083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorflow.int32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.test", "line_number": 30, "usage_type": "attribute"}, {"api_name": "absl.testing.parameterized.TestCase", "line_number": 30, "usage_type": "attribute"}, {"api_name": "absl.testing.parameterized", "line_number": 30, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 35, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 35, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow_federated.templates", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.type_at_server", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow_federated.FunctionType", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow_federated.test.assert_types_equivalent", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow_federated.test", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.StructType", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.type_at_server", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow_federated.FunctionType", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow_federated.type_at_clients", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow_federated.templates.MeasuredProcessOutput", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow_federated.templates", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.type_at_server", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow_federated.test.assert_types_equivalent", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow_federated.test", "line_number": 58, "usage_type": "attribute"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 32, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 32, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 65, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 65, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 66, "usage_type": "call"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 61, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.test", "line_number": 70, "usage_type": "attribute"}, {"api_name": "absl.testing.parameterized.TestCase", "line_number": 70, "usage_type": "attribute"}, {"api_name": "absl.testing.parameterized", "line_number": 70, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 75, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 75, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 82, "usage_type": "call"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 72, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 72, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 93, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 93, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 100, "usage_type": "call"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 90, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 90, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 111, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 111, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 112, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 118, "usage_type": "call"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 108, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 108, "usage_type": "name"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc.ThreeLCFactory", "line_number": 129, "usage_type": "call"}, {"api_name": "compressed_communication.aggregators.comparison_methods.three_lc", "line_number": 129, "usage_type": "name"}, {"api_name": "tensorflow_federated.to_type", "line_number": 130, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 136, "usage_type": "call"}, {"api_name": "absl.testing.parameterized.named_parameters", "line_number": 126, "usage_type": "call"}, {"api_name": "absl.testing.parameterized", "line_number": 126, "usage_type": "name"}, {"api_name": "tensorflow.test.main", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 146, "usage_type": "attribute"}]} +{"seq_id": "608104916", "text": "import os\nfrom telethon import TelegramClient, events\n\n\napi_id = int(os.environ.get('api_id', 5000))\napi_hash = str(os.environ.get('api_hash', 5000))\n\nclient = TelegramClient('anon', api_id, api_hash)\n\n\n@client.on(events.NewMessage)\nasync def my_event_handler(event):\n if 'привет' in event.raw_text:\n await client.send_message('+79992007908', event.raw_text)\n\nclient.start()\nclient.run_until_disconnected()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ.get", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "telethon.TelegramClient", "line_number": 8, "usage_type": "call"}, {"api_name": "telethon.events.NewMessage", "line_number": 11, "usage_type": "attribute"}, {"api_name": "telethon.events", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "234746856", "text": "# -*- coding: utf-8 -*-\nimport os\nimport warnings\nimport shutil\nimport pathlib\nimport sphinx_rtd_theme\n\nPROJECT_ROOT=pathlib.Path(__file__).parent.parent\n\npandoc_installed = False if os.system(\"pandoc --help > /dev/null 2>&1\") else True\n\nif not pandoc_installed:\n warnings.warn(\"pandoc not installed - install brew then brew install pandoc\")\n\n\n\ndef setup(app):\n \"\"\"Forces the auto generation of the documentation at build time.\"\"\"\n os.system(\"sphinx-apidoc -f -T -o docs/autogen src/testspace_colab\")\n shutil.copytree(src=PROJECT_ROOT / 'notebooks', dst='docs/autogen/notebook', dirs_exist_ok=True)\n\n\n# ------------------------------------------------------------------------------\n# General information about the project.\n# ------------------------------------------------------------------------------\n\nproject = u\"testspace-colab\"\ncopyright = u\"2021, S2 Technologies, Inc\"\nauthor = \"Laurent Brack\"\n\n# ------------------------------------------------------------------------------\n# General Configuration\n# ------------------------------------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"1.8\"\n\n# Add any Sphinx extension module names here, as strings. They can\n# be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n# See http://www.sphinx-doc.org/en/stable/extensions.html\nextensions = [\n \"sphinx_autorun\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.inheritance_diagram\",\n \"sphinx.ext.graphviz\",\n]\n\n\n# ----------------------------------------------------------------------------\n# To do extension configuration\n# https://www.sphinx-doc.org/en/master/usage/extensions/todo.html\n# ----------------------------------------------------------------------------\ntodo_include_todos=True\ntodo_link_only=True\n\nif pandoc_installed:\n extensions.append(\"nbsphinx\")\n\n# -----------------------------------------------------------------------------\n# sphinx.ext.intersphinx\n# -----------------------------------------------------------------------------\nintersphinx_mapping = {\n \"python\" : (\" https://doc.python.org/3/\", None),\n 'docker' : (\"https://docker-py.readthedocs.io/en/stable/\", None),\n 'elastic' : (\"https://elasticsearch-py.readthedocs.io/en/latest/\", None),\n\n}\n\n# -----------------------------------------------------------------------------\n# sphinx.ext.autodoc\n# http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_default_options\n# -----------------------------------------------------------------------------\nautodoc_member_order = \"alphabetical\"\nautodoc_default_options = {\"members\": None, \"show-inheritance\": None}\nautoclass_content = \"class\"\nautodoc_warningiserror = True\n\n\n# -----------------------------------------------------------------------------\n# 'sphinx.ext.inheritance_diagram',\n# -----------------------------------------------------------------------------\ninheritance_graph_attrs = dict(rankdir=\"LR\", size='\"\"', fontsize=12, ratio=\"compress\")\ninheritance_node_attrs = dict(fontsize=12, style=\"filled\")\n\n# -----------------------------------------------------------------------------\n# General information about the project.\n# -----------------------------------------------------------------------------\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n\ntoday_fmt = \"%Y-%m-%dT%H:%M %Z\"\nsource_suffix = \".rst\"\n\n# The extlinks extension simplifies referencing multiple links to a given URL,\n# for example links to bug trackers, version control web interfaces, etc.\n# For example, to link to a JIRA issue in the doc, use :issue:`123`, which\n# would create a link to ISSUE-123\n# See http://www.sphinx-doc.org/en/stable/ext/extlinks.html\nextlinks = {\n \"issue\": (\n \"https://github.com/lbrack/testspace-colab/issues/%s\",\n \"ISSUE-\"\n )\n}\n\n# A list of ignored prefixes for module index sorting.\nmodindex_common_prefix = [project + \".\"]\n\n# ----------------------------------------------------------------------------\n# Custom Theme Options\n# ----------------------------------------------------------------------------\n# The frontpage document.\nindex_doc = \"index\"\n# The master toctree document.\nmaster_doc = \"index\"\n# Manages todo section\ntodo_include_todos = True\ninclude_todos = True\n\n# warning will be inserted in the final documentation\nkeep_warnings = True\n\n\n# -- Options for HTML output --------------------------------------------------\n\nhtml_theme_options = {\n \"canonical_url\": \"\",\n \"logo_only\": True,\n \"display_version\": True,\n \"prev_next_buttons_location\": \"bottom\",\n \"style_external_links\": False,\n \"style_nav_header_background\": \"#2980B9\",\n # Toc options\n \"collapse_navigation\": True,\n \"sticky_navigation\": True,\n \"navigation_depth\": 4,\n \"includehidden\": True,\n \"titles_only\": False,\n}\n\n# sphinxcontrib.napoleon extension configuration\n# see https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html\n# for details\nnapoleon_google_docstring = True\nnapoleon_numpy_docstring = True\nnapoleon_include_init_with_doc = False\n\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_favicon = os.path.join(\"_static\", \"icon.ico\")\nhtml_logo = os.path.join(\"_static\", \"logo.png\")\nhtml_title = project\nhtml_last_updated_fmt = today_fmt\nhtml_show_sphinx = False\nhtml_show_copyright = True\nhtml_last_updated_fmt = today_fmt\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + \"-doc\"\n", "sub_path": "docs/conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 6113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pathlib.Path", "line_number": 8, "usage_type": "call"}, {"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 13, "usage_type": "call"}, {"api_name": "os.system", "line_number": 19, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 20, "usage_type": "call"}, {"api_name": "sphinx_rtd_theme.get_html_theme_path", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}]} +{"seq_id": "225592973", "text": "splash = '''\n\n888 888 888 .d888 888888b. 888\n888 o 888 888 d88P\" 888 \"88b 888\n888 d8b 888 888 888 888 .88P 888\n888 d888b 888 .d88b. 888d888 .d88b. 888 888 888 .d88b. 888 888888 8888888K. .d88b. 888888\n888d88888b888 d8P Y8b 888P\" d8P Y8b 888 888 888 d88\"\"88b 888 888 888 \"Y88b d88\"\"88b 888\n88888P Y88888 88888888 888 88888888 888 888 888 888 888 888 888 888 888 888 888 888\n8888P Y8888 Y8b. 888 Y8b. Y88b 888 d88P Y88..88P 888 888 888 d88P Y88..88P Y88b.\n888P Y888 \"Y8888 888 \"Y8888 \"Y8888888P\" \"Y88P\" 888 888 8888888P\" \"Y88P\" \"Y888\n\n - = https://github.com/werewolves-devs/werewolf_bot = -\n\n'''\n\nsplashes = [\n'Now with 100% less JavaScript',\n'I made it, we *HAVE* to use it',\n'Standards? What are they?',\n'Nah, we don\\'t use libraries here.',\n'The mailbox system is a \\'good idea\\'',\n'Leaking tokens is fun!',\n'Let\\'s just shove everything into main.py, who still does organization in 2018',\n'Works on my machine',\n'Always use a database. What\\'s a JSON?',\n'Powered by Electricity',\n'Who still writes docs in 2018?',\n\"First normal form? What does that mean?\",\n\"By using a relational database but with nonrelational practices we get the worst of both worlds!\",\n\"I haven\\'t paid attention or read any comments, therefor it\\'s impossible to understand!\",\n\"Don\\'t use that! Oh, you\\'re asking why? Well... just don\\'t it.\",\n\"I don\\'t wanna explain, just Google it.\",\n\"What are cogs?\",\n\"This is MY project. You\\'re just freeloaders.\",\n\"You've got three weeks to fix EVERYTHING.\",\n\"No-one agrees? Too bad! My idea it is.\",\n\"The next version will be written in Java only!\"\n]\n\nimport discord\nimport random\nimport asyncio\n\n# Import config data\nimport story_time.cc_creation as creation_messages\nfrom config import welcome_channel, game_master, dead_participant, frozen_participant, administrator\nfrom config import ww_prefix as prefix\nfrom management.db import db_set, db_get\nfrom interpretation.ww_head import process\nfrom interpretation.polls import count_votes\nimport config\nimport management.db as db\n\n\nclient = discord.Client()\n\ndef get_role(server_roles, target_id):\n for each in server_roles:\n if each.id == target_id:\n return each\n return None\n\nasync def remove_all_game_roles(member):\n for role in member.roles:\n if role.id == config.frozen_participant:\n await member.remove_roles(role, reason=\"Updating CC permissions\")\n if role.id == config.dead_participant:\n await member.remove_roles(role, reason=\"Updating CC permissions\")\n if role.id == config.suspended:\n await member.remove_roles(role, reason=\"Updating CC permissions\")\n\n# Whenever a message is sent.\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n gamelog_channel = client.get_channel(int(config.game_log))\n botspam_channel = client.get_channel(int(config.bot_spam))\n storytime_channel = client.get_channel(int(config.story_time))\n\n # Check if the message author has the Game Master role\n isGameMaster = False\n if message.guild == gamelog_channel.guild:\n if game_master in [y.id for y in message.guild.get_member(message.author.id).roles]:\n isGameMaster = True\n\n isAdmin = False\n if message.guild == gamelog_channel.guild:\n if administrator in [y.id for y in message.guild.get_member(message.author.id).roles]:\n isAdmin = True\n\n result = process(message,isGameMaster,isAdmin)\n\n temp_msg = []\n\n for mailbox in result:\n\n if mailbox.evaluate_polls == True:\n for poll in db.get_all_polls():\n # poll.msg_table -> list of message ids\n # poll.blamed -> name of killer\n # poll.purpose -> the reason of the kill\n\n poll_channel = client.get_channel(int(poll.channel))\n if poll_channel == None:\n await botspam_channel.send(\"We got a problem! Could you send these results to the appropriate channel, please?\")\n poll_channel = botspam_channel\n\n user_table = []\n for msg in poll.msg_table:\n poll_msg = await poll_channel.get_message(msg)\n for emoji in poll_msg.reactions:\n users = await emoji.users().flatten()\n\n for person in users:\n if db.isParticipant(person.id):\n user_table.append([person.id,emoji.emoji])\n\n log, result, chosen_emoji = count_votes(user_table,poll.purpose)\n\n await gamelog_channel.send(log)\n await poll_channel.send(result)\n\n chosen_one = db.emoji_to_player(chosen_emoji)\n\n if chosen_emoji != '' and chosen_one != None:\n if poll.purpose == 'lynch':\n db.add_kill(chosen_one,'Innocent')\n elif poll.purpose == 'Mayor':\n # TODO: give Mayor role and add data to dynamic.json\n pass\n elif poll.purpose == 'Reporter':\n # TODO: give Reporter role and add data to dynamic.json\n pass\n elif poll.purpose == 'wolf':\n db.add_kill(chosen_one,'Werewolf',db.random_wolf())\n elif poll.purpose == 'cult':\n db.add_kill(chosen_one,'Cult Leader',db.random_cult())\n elif poll.purpose == 'thing':\n # TODO: kill poor victim\n pass\n\n\n for element in mailbox.gamelog:\n msg = await gamelog_channel.send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.botspam:\n msg = await botspam_channel.send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.storytime:\n msg = await storytime_channel.send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.answer:\n msg = await message.channel.send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.channel:\n if element.embed:\n if element.destination == \"spam\":\n msg = await botspam_channel.send(embed=element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n else:\n msg = await client.get_channel(int(element.destination)).send(embed=element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n else:\n msg = await client.get_channel(int(element.destination)).send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.player:\n member = client.get_user(element.destination)\n if member == None:\n await message.channel.send(\"Couldn't send a DM to <@{}>!\".format(element.destination))\n await botspam_channel.send(\"<@{}> has attempted to send a DM to <@{}>, but failed, because we couldn't find the specified user via `get_user`.\".format(message.author.id,element.destination))\n else:\n msg = await member.send(element.content)\n for emoji in element.reactions:\n await msg.add_reaction(emoji)\n if element.temporary == True:\n temp_msg.append(msg)\n\n for element in mailbox.oldchannels:\n # element.channel - channel to be edited;\n # element.victim - person's permission to be changed;\n # element.number - type of setting to set to:\n # 0 - no access (no view, no type)\n # 1 - access (view + type)\n # 2 - frozen (view, no type)\n # 3 - abducted (no view, no type)\n # 4 - dead (dead role?)\n\n # 0 -> read = False\n # 1 -> read = True\n # 2 -> give frozen (if they don't have it yet)\n # 3 -> read = False\n # 4 -> give dead role + remove participant role\n # 5 -> mute\n # 6 -> also mute, no read\n\n channel = client.get_channel(element.channel)\n user = client.get_user(element.victim)\n main_guild = botspam_channel.guild\n member = main_guild.get_member(element.victim)\n await remove_all_game_roles(member)\n if element.number == 0:\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.participant), reason=\"Updating CC Permissions\")\n elif element.number == 1:\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await member.add_roles(get_role(main_guild.roles, config.participant), reason=\"Updating CC Permissions\")\n elif element.number == 2:\n await channel.set_permissions(user, read_messages=True, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.frozen_participant), reason=\"Updating CC Permissions\")\n elif element.number == 3:\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.participant), reason=\"Updating CC Permissions\")\n elif element.number == 4:\n await channel.set_permissions(user, read_messages=True, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.dead_participant), reason=\"Updating CC Permissions\")\n elif element.number == 5:\n await channel.set_permissions(user, read_messages=True, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.participant), reason=\"Updating CC Permissions\")\n elif element.number == 6:\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.participant), reason=\"Updating CC Permissions\")\n elif element.number == 7:\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.dead_participant), reason=\"Updating CC Permissions\")\n elif element.number == 8:\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await member.add_roles(get_role(main_guild.roles, config.suspended), reason=\"Updating CC Permissions\")\n else:\n await msg.channel.send('Something went wrong! Please contact a Game Master.')\n return\n if db.isParticipant(element.victim,True,True):\n db.set_user_in_channel(element.channel,element.victim,element.number)\n\n\n for element in mailbox.newchannels:\n # element.name - name of the channel;\n # element.owner - owner of the channel;\n # element.members - members of the channel\n # element.settlers - members for whom this shall become their home channel\n #\n # @Participant - no view + type\n # @dead Participant - view + no type\n # @everyone - no view + no type\n\n # All you need to do is create a channel where only the channel owner has access.\n # The other members are given access through another Mailbox.\n # You could make the work easier if you also posted a cc channel message already over here.\n\n if ' ' not in element.name:\n\n main_guild = botspam_channel.guild # Find the guild we're in\n\n if element.owner not in element.members:\n element.members.append(element.owner)\n for buddy in element.settlers:\n if buddy not in element.members:\n msg = \"\"\"**Warning:** I'm adding settlers to a channel!\\nThis is should not be a problem, \\\n but it does at least indicate a flaw in the bot's code. Please, report this to the Game Masters!\"\"\"\n await client.get_channel(message.channel).send(msg)\n element.members.append(buddy)\n\n viewers = []\n frozones = []\n abductees = []\n deadies = []\n for user in element.members:\n member = main_guild.get_member(user)\n\n if member == None:\n await message.author.send(\"It doesn't seem like <@{}> is part of the server! I am sorry, I can't add them to your **conspiracy channel**.\".format(user))\n elif db.isParticipant(user,False,True) == True:\n if int(db_get(user,'abducted')) == 1:\n abductees.append(member)\n elif int(db_get(user,'frozen')) == 1:\n frozones.append(member)\n elif db.isParticipant(user,False,False) == False:\n deadies.append(member)\n else:\n viewers.append(member)\n else:\n deadies.append(member)\n\n intro_msg = creation_messages.cc_intro([v.id for v in viewers])\n\n # Role objects (based on ID)\n roles = main_guild.roles # Roles from the guild\n game_master_role = discord.utils.find(lambda r: r.id == game_master, roles)\n default_permissions = {\n main_guild.default_role: discord.PermissionOverwrite(read_messages=False,send_messages=False),\n game_master_role: discord.PermissionOverwrite(read_messages=True,send_messages=True),\n client.user: discord.PermissionOverwrite(read_messages=True,send_messages=True),\n **{\n member: discord.PermissionOverwrite(read_messages=True,send_messages=True) for member in viewers\n },\n **{\n member: discord.PermissionOverwrite(read_messages=True,send_messages=False) for member in frozones\n },\n **{\n member: discord.PermissionOverwrite(read_messages=True,send_messages=False) for member in deadies\n }\n }\n\n # Create a new category if needed\n if db.get_category() == None:\n category = await main_guild.create_category('CC part {}'.format(db.count_categories()), reason='It seems like we couldn\\'t use our previous category! Don\\'t worry, I just created a new one.')\n db.add_category(category.id)\n else:\n category = main_guild.get_channel(db.get_category())\n\n try:\n # Create the text channel\n reason_msg = 'CC requested by ' + message.author.name\n channel = await main_guild.create_text_channel(\n name=\"s{}_{}\".format(config.season,element.name),\n category=category,\n overwrites=default_permissions,\n reason=reason_msg)\n db.add_channel(channel.id,element.owner)\n await channel.send(intro_msg)\n\n # Set all access rules in the database\n for member in viewers:\n db.set_user_in_channel(channel.id,member.id,1)\n for member in frozones:\n db.set_user_in_channel(channel.id,member.id,2)\n for member in abductees:\n db.set_user_in_channel(channel.id,member.id,3)\n for member in deadies:\n if db.isParticipant(member.id,True,True) == True:\n db.set_user_in_channel(channel.id,member.id,4)\n\n\n except Exception as e: # Catch any thrown exceptions and send an error to the user.\n await message.channel.send('It seems like I\\'ve encountered an error! Please let the Game Masters know about this!')\n await botspam_channel.send(\"Oi, Game Masters! I got a problem concerning channel creation for ya to fix.\")\n await botspam_channel.send(e)\n raise e # Send the full log to Buddy1913 and his sketchy VM.\n\n # Give the settlers their own happy little residence\n for buddy in element.settlers:\n db_set(buddy,\"channel\",channel.id)\n\n else:\n \"\"\"This should not happen, but we'll use it, to prevent the bot from purposely causing an error\n everytime someone attempts to create a channel that contains spaces. 'cause believe me,\n that happens ALL the time.\"\"\"\n msg = await message.channel.send(\"I\\'m terribly sorry, but you can\\'t use spaces in your channel name. Try again!\")\n temp_msg.append(msg)\n\n for element in mailbox.polls:\n # element.channel\n # element.purpose\n # element.user_id\n # element.description\n\n msg = element.description + '\\n'\n emoji_table = []\n msg_table = []\n i = 0\n\n for user in db.poll_list():\n if db.isParticipant(int(user[0])):\n i += 1\n msg += user[1] + \" - <@\" + str(user[0]) + \"> \"\n\n if int(user[2]) + int(user[3]) > 0:\n if int(user[2]) == 1:\n msg += \"**[FROZEN]** \"\n if int(user[3]) == 1:\n msg += \"**[ABDUCTED] **\"\n else:\n emoji_table.append(user[1])\n\n if i % 20 == 19:\n msg = await client.get_channel(element.channel).send(msg)\n for emoji in emoji_table:\n await msg.add_reaction(emoji)\n msg_table.append(msg)\n msg = ''\n else:\n msg += '\\n'\n\n if msg != '':\n msg = await client.get_channel(element.channel).send(msg)\n for emoji in emoji_table:\n await msg.add_reaction(emoji)\n msg_table.append(msg)\n db.add_poll(msg_table,element.purpose,element.channel,element.user_id)\n await botspam_channel.send(\"A poll has been created in <#{}>!\".format(element.channel))\n\n for element in mailbox.deletecategories:\n id = element.channel\n category = client.get_channel(id)\n if category != None:\n bot_message = await message.channel.send('Please react with 👍 to confirm deletion of category `' + category.name + '`.\\n\\nNote: This action will irrevirsibly delete all channels contained within the specified category. Please use with discretion.')\n await bot_message.add_reaction('👍')\n def check(reaction, user):\n return user == message.author and str(reaction.emoji) == '👍'\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=30.0, check=check)\n except asyncio.TimeoutError:\n await message.channel.send('Confirmation timed out.')\n else:\n await message.channel.send('Ok, I\\'ll get right on that.\\n\\n*This might take some time.*')\n for channel in category.channels:\n await channel.delete()\n await category.delete()\n await message.channel.send('\\n:thumbsup: Channels and category deleted')\n else:\n await message.channel.send('Sorry, I couldn\\'t find that category.')\n\n # Delete all temporary messages after \"five\" seconds.\n await asyncio.sleep(120)\n for msg in temp_msg:\n await msg.delete()\n\n\n# Whenever the bot regains his connection with the Discord API.\n@client.event\nasync def on_ready():\n print(' --> Logged in as')\n print(' | > ' + client.user.name)\n print(' | > ' + str(client.user.id))\n\n await client.get_channel(welcome_channel).send('Beep boop! I just went online!')\n\nprint(splash)\nprint(' --> \"' + random.choice(splashes) + '\"')\nprint(' --> Please wait whilst we connect to the Discord API...')\ntry:\n client.run(config.TOKEN)\nexcept:\n print(' | > Error logging in. Check your token is valid and you are connected to the Internet.')\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 22318, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "discord.Client", "line_number": 55, "usage_type": "call"}, {"api_name": "config.frozen_participant", "line_number": 65, "usage_type": "attribute"}, {"api_name": "config.dead_participant", "line_number": 67, "usage_type": "attribute"}, {"api_name": "config.suspended", "line_number": 69, "usage_type": "attribute"}, {"api_name": "config.game_log", "line_number": 79, "usage_type": "attribute"}, {"api_name": "config.bot_spam", "line_number": 80, "usage_type": "attribute"}, {"api_name": "config.story_time", "line_number": 81, "usage_type": "attribute"}, {"api_name": "config.game_master", "line_number": 86, "usage_type": "name"}, {"api_name": "config.administrator", "line_number": 91, "usage_type": "name"}, {"api_name": "interpretation.ww_head.process", "line_number": 94, "usage_type": "call"}, {"api_name": "management.db.get_all_polls", "line_number": 101, "usage_type": "call"}, {"api_name": "management.db", "line_number": 101, "usage_type": "name"}, {"api_name": "management.db.isParticipant", "line_number": 118, "usage_type": "call"}, {"api_name": "management.db", "line_number": 118, "usage_type": "name"}, {"api_name": "interpretation.polls.count_votes", "line_number": 121, "usage_type": "call"}, {"api_name": "management.db.emoji_to_player", "line_number": 126, "usage_type": "call"}, {"api_name": "management.db", "line_number": 126, "usage_type": "name"}, {"api_name": "management.db.add_kill", "line_number": 130, "usage_type": "call"}, {"api_name": "management.db", "line_number": 130, "usage_type": "name"}, {"api_name": "management.db.add_kill", "line_number": 138, "usage_type": "call"}, {"api_name": "management.db", "line_number": 138, "usage_type": "name"}, {"api_name": "management.db.random_wolf", "line_number": 138, "usage_type": "call"}, {"api_name": "management.db.add_kill", "line_number": 140, "usage_type": "call"}, {"api_name": "management.db", "line_number": 140, "usage_type": "name"}, {"api_name": "management.db.random_cult", "line_number": 140, "usage_type": "call"}, {"api_name": "config.participant", "line_number": 232, "usage_type": "attribute"}, {"api_name": "config.participant", "line_number": 235, "usage_type": "attribute"}, {"api_name": "config.frozen_participant", "line_number": 238, "usage_type": "attribute"}, {"api_name": "config.participant", "line_number": 241, "usage_type": "attribute"}, {"api_name": "config.dead_participant", "line_number": 244, "usage_type": "attribute"}, {"api_name": "config.participant", "line_number": 247, "usage_type": "attribute"}, {"api_name": "config.participant", "line_number": 250, "usage_type": "attribute"}, {"api_name": "config.dead_participant", "line_number": 253, "usage_type": "attribute"}, {"api_name": "config.suspended", "line_number": 256, "usage_type": "attribute"}, {"api_name": "management.db.isParticipant", "line_number": 260, "usage_type": "call"}, {"api_name": "management.db", "line_number": 260, "usage_type": "name"}, {"api_name": "management.db.set_user_in_channel", "line_number": 261, "usage_type": "call"}, {"api_name": "management.db", "line_number": 261, "usage_type": "name"}, {"api_name": "management.db.isParticipant", "line_number": 300, "usage_type": "call"}, {"api_name": "management.db", "line_number": 300, "usage_type": "name"}, {"api_name": "management.db.db_get", "line_number": 301, "usage_type": "call"}, {"api_name": "management.db.db_get", "line_number": 303, "usage_type": "call"}, {"api_name": "management.db.isParticipant", "line_number": 305, "usage_type": "call"}, {"api_name": "management.db", "line_number": 305, "usage_type": "name"}, {"api_name": "story_time.cc_creation.cc_intro", "line_number": 312, "usage_type": "call"}, {"api_name": "story_time.cc_creation", "line_number": 312, "usage_type": "name"}, {"api_name": "discord.utils.find", "line_number": 316, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 316, "usage_type": "attribute"}, {"api_name": "config.game_master", "line_number": 316, "usage_type": "name"}, {"api_name": "discord.PermissionOverwrite", "line_number": 318, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 319, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 320, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 322, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 325, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 328, "usage_type": "call"}, {"api_name": "management.db.get_category", "line_number": 333, "usage_type": "call"}, {"api_name": "management.db", "line_number": 333, "usage_type": "name"}, {"api_name": "management.db.count_categories", "line_number": 334, "usage_type": "call"}, {"api_name": "management.db", "line_number": 334, "usage_type": "name"}, {"api_name": "management.db.add_category", "line_number": 335, "usage_type": "call"}, {"api_name": "management.db", "line_number": 335, "usage_type": "name"}, {"api_name": "management.db.get_category", "line_number": 337, "usage_type": "call"}, {"api_name": "management.db", "line_number": 337, "usage_type": "name"}, {"api_name": "config.season", "line_number": 343, "usage_type": "attribute"}, {"api_name": "management.db.add_channel", "line_number": 347, "usage_type": "call"}, {"api_name": "management.db", "line_number": 347, "usage_type": "name"}, {"api_name": "management.db.set_user_in_channel", "line_number": 352, "usage_type": "call"}, {"api_name": "management.db", "line_number": 352, "usage_type": "name"}, {"api_name": "management.db.set_user_in_channel", "line_number": 354, "usage_type": "call"}, {"api_name": "management.db", "line_number": 354, "usage_type": "name"}, {"api_name": "management.db.set_user_in_channel", "line_number": 356, "usage_type": "call"}, {"api_name": "management.db", "line_number": 356, "usage_type": "name"}, {"api_name": "management.db.isParticipant", "line_number": 358, "usage_type": "call"}, {"api_name": "management.db", "line_number": 358, "usage_type": "name"}, {"api_name": "management.db.set_user_in_channel", "line_number": 359, "usage_type": "call"}, {"api_name": "management.db", "line_number": 359, "usage_type": "name"}, {"api_name": "management.db.db_set", "line_number": 370, "usage_type": "call"}, {"api_name": "management.db.poll_list", "line_number": 390, "usage_type": "call"}, {"api_name": "management.db", "line_number": 390, "usage_type": "name"}, {"api_name": "management.db.isParticipant", "line_number": 391, "usage_type": "call"}, {"api_name": "management.db", "line_number": 391, "usage_type": "name"}, {"api_name": "management.db.add_poll", "line_number": 417, "usage_type": "call"}, {"api_name": "management.db", "line_number": 417, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 430, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 442, "usage_type": "call"}, {"api_name": "config.welcome_channel", "line_number": 454, "usage_type": "argument"}, {"api_name": "random.choice", "line_number": 457, "usage_type": "call"}, {"api_name": "config.TOKEN", "line_number": 460, "usage_type": "attribute"}]} +{"seq_id": "226888031", "text": "import os\nimport textwrap\nimport json\nimport io\n\nimport yaml\nimport py\nimport pytest\nimport pkg_resources\nimport jinja2\nimport bs4\n\nfrom sphinx.application import Sphinx\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n@pytest.fixture(scope='function')\ndef run_sphinx(tmpdir):\n src = tmpdir.mkdir('src')\n out = tmpdir.mkdir('out')\n\n spec = py.path.local(here).join('..', 'docs', '_specs', 'github.yml')\n spec.copy(src.mkdir('_specs').join('github.yml'))\n\n def run(redoc_overwrite=None, redoc_uri=None):\n conf = {'name': 'Github API (v3)',\n 'page': 'api/github/index',\n 'spec': '_specs/github.yml'}\n conf.update(redoc_overwrite or {})\n\n confpy = jinja2.Template(textwrap.dedent('''\n import os\n\n project = 'sphinxcontrib-redoc'\n copyright = '2017, Ihor Kalnytskyi'\n\n extensions = ['sphinxcontrib.redoc']\n source_suffix = '.rst'\n master_doc = 'index'\n redoc = {{ redoc }}\n redoc_uri = {{ redoc_uri }}\n ''')).render(redoc=[conf], redoc_uri=repr(redoc_uri))\n\n src.join('conf.py').write_text(confpy, encoding='utf-8')\n src.join('index.rst').ensure()\n\n Sphinx(\n srcdir=src.strpath,\n confdir=src.strpath,\n outdir=out.strpath,\n doctreedir=out.join('.doctrees').strpath,\n buildername='html'\n ).build()\n\n yield run\n\n\ndef test_redocjs_lib_is_copied(run_sphinx, tmpdir):\n outdir = tmpdir.join('out')\n extdir = py.path.local(\n pkg_resources.get_provider('sphinxcontrib.redoc').module_path)\n\n run_sphinx()\n\n assert outdir.join('_static', 'redoc.js').check()\n assert outdir.join('_static', 'redoc.js').computehash() \\\n == extdir.join('redoc.js').computehash()\n\n\ndef test_redocjs_lib_is_downloaded(run_sphinx, tmpdir):\n outdir = tmpdir.join('out')\n extdir = py.path.local(\n pkg_resources.get_provider('sphinxcontrib.redoc').module_path)\n\n run_sphinx(redoc_uri=(\n 'https://cdn.jsdelivr.net/npm/redoc@2.0.0-alpha.32/bundles'\n '/redoc.standalone.js'))\n\n assert outdir.join('_static', 'redoc.js').check()\n assert outdir.join('_static', 'redoc.js').computehash() \\\n != extdir.join('redoc.js').computehash()\n assert outdir.join('_static', 'redoc.js').computehash() \\\n == '6978103258cab653263b5b75c008b474'\n\n\ndef test_openapi_spec_is_copied(run_sphinx, tmpdir):\n srcdir, outdir = tmpdir.join('src'), tmpdir.join('out')\n\n run_sphinx()\n\n assert outdir.join('_specs', 'github.yml').check()\n assert outdir.join('_specs', 'github.yml').computehash() \\\n == srcdir.join('_specs', 'github.yml').computehash()\n\n\n@pytest.mark.parametrize('options, attributes', [\n (None,\n {}),\n\n ({},\n {}),\n\n ({'lazy-rendering': False,\n 'suppress-warnings': False,\n 'hide-hostname': False,\n 'required-props-first': False,\n 'no-auto-auth': False,\n 'path-in-middle-panel': False,\n 'hide-loading': False,\n 'native-scrollbars': False,\n 'untrusted-spec': False,\n 'expand-responses': []},\n {}),\n\n ({'lazy-rendering': True},\n {'lazy-rendering': ''}),\n\n ({'suppress-warnings': True},\n {'suppress-warnings': ''}),\n\n ({'hide-hostname': True},\n {'hide-hostname': ''}),\n\n ({'required-props-first': True},\n {'required-props-first': ''}),\n\n ({'no-auto-auth': True},\n {'no-auto-auth': ''}),\n\n ({'path-in-middle-panel': True},\n {'path-in-middle-panel': ''}),\n\n ({'hide-loading': True},\n {'hide-loading': ''}),\n\n ({'native-scrollbars': True},\n {'native-scrollbars': ''}),\n\n ({'untrusted-spec': True},\n {'untrusted-spec': ''}),\n\n ({'expand-responses': ['200', '404']},\n {'expand-responses': '200,404'}),\n])\ndef test_redocjs_page_is_generated(run_sphinx, tmpdir, options, attributes):\n run_sphinx(redoc_overwrite={'opts': options})\n\n html = tmpdir.join('out').join('api', 'github', 'index.html').read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n assert soup.title.string == 'Github API (v3)'\n assert soup.redoc.attrs == attributes\n assert soup.script.attrs['src'] == os.path.join(\n '..', '..', '_static', 'redoc.js')\n\n assert os.path.join('..', '..', '_specs', 'github.yml') \\\n in soup.find_all('script')[-1].text\n\n\ndef test_embedded_spec(run_sphinx, tmpdir):\n run_sphinx(redoc_overwrite={'embed': True})\n\n html = tmpdir.join('out').join('api', 'github', 'index.html').read()\n spec = tmpdir.join('src', '_specs', 'github.yml').strpath\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n with io.open(spec, encoding='utf-8') as f:\n spec = yaml.safe_load(f)\n\n embedded_spec = soup.find(id='spec').get_text()\n assert json.loads(embedded_spec) == spec\n", "sub_path": "tests/test_integration.py", "file_name": "test_integration.py", "file_ext": "py", "file_size_in_byte": 4833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "py.path.local", "line_number": 24, "usage_type": "call"}, {"api_name": "py.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "jinja2.Template", "line_number": 33, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 33, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "call"}, {"api_name": "py.path.local", "line_number": 62, "usage_type": "call"}, {"api_name": "py.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pkg_resources.get_provider", "line_number": 63, "usage_type": "call"}, {"api_name": "py.path.local", "line_number": 74, "usage_type": "call"}, {"api_name": "py.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pkg_resources.get_provider", "line_number": 75, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 98, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 167, "usage_type": "call"}, {"api_name": "io.open", "line_number": 169, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 170, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "293335948", "text": "\nimport threading\nimport redpitaya_scpi as scpi\nimport matplotlib.pyplot as plot\nimport csv\nfrom peaks import calculate_peak\nimport numpy as np\n\nrp_s = scpi.scpi('192.168.128.1')\ndef getData():\n try:\n threading.Timer(3, getData).start()\n wave_form = 'sine'\n freq = 10000\n ampl = 2\n\n rp_s.tx_txt('GEN:RST')\n rp_s.tx_txt('SOUR1:FUNC ' + str(wave_form).upper())\n rp_s.tx_txt('SOUR1:FREQ:FIX ' + str(freq))\n rp_s.tx_txt('SOUR1:VOLT ' + str(ampl))\n rp_s.tx_txt('SOUR1:BURS:NCYC 2')\n rp_s.tx_txt('OUTPUT1:STATE ON')\n rp_s.tx_txt('SOUR1:BURS:STAT ON')\n rp_s.tx_txt('SOUR1:TRIG:SOUR EXT_PE')\n\n rp_s.tx_txt('ACQ:DEC 64')\n rp_s.tx_txt('ACQ:TRIG:LEVEL 100')\n rp_s.tx_txt('ACQ:START')\n rp_s.tx_txt('ACQ:TRIG EXT_PE')\n rp_s.tx_txt('ACQ:TRIG:DLY 9000')\n\n while 1:\n rp_s.tx_txt('ACQ:TRIG:STAT?')\n if rp_s.rx_txt() == 'TD':\n break\n\n rp_s.tx_txt('ACQ:SOUR1:DATA?')\n buff_string = rp_s.rx_txt()\n buff_string = buff_string.strip('{}\\n\\r').replace(\" \", \"\").split(',')\n buff = list(map(float, buff_string))\n\n #peaks = calculate_peak(buff)\n #test = peaks.get('peak_heights')\n #row = {test.min(), test.max()}\n\n writer = csv.writer(open(\"ml/car.csv\", 'a'))\n writer.writerow(buff)\n plot.plot(buff)\n plot.ylabel('Voltage')\n plot.show()\n except:\n print(\"An exception occurred\")\n\n\ngetData()", "sub_path": "Resources/SL2_1276493_1277599/SL2_1276493_1277599/data_get_common_code.py", "file_name": "data_get_common_code.py", "file_ext": "py", "file_size_in_byte": 1523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "redpitaya_scpi.scpi", "line_number": 9, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 12, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "261913526", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport itertools\nimport json\nfrom datetime import datetime\nfrom random import choice, randint, shuffle\n\nfrom django.contrib.auth.models import User\n\nfrom contenido.models import *\n\nDesafio.objects.all().delete()\nJugador_equipo.objects.all().delete()\nEquipo.objects.all().delete()\nLocalidad.objects.all().delete()\nDeporte.objects.all().delete()\nInstitucion.objects.all().delete()\n\n\nequipos_por_deporte = 15\nnum_desafios_por_deporte = 10\n\n\nadmin = [{'nombre': ['admin', 'admin'], 'fechanac': '1998-03-21 00:00:00', }]\n\n\nf = open('jugadores.json', 'r').read()\nusers = json.loads(f)\n\nfor u in admin+users:\n aux_username = u['nombre'][0][0].lower()+u['nombre'][1].lower()\n if User.objects.filter(username=aux_username).count() == 0:\n us = User()\n us.username = aux_username\n us.password = 'pbkdf2_sha256$24000$WuVONA6Xzl7V$N7a1GHNhLV9cKNeHkK4JBBXeTeOL99jkmElfN4AkEPg=' # 'adidas10'\n us.save()\n ju = Jugador()\n ju.Nombre = u['nombre'][0]\n ju.Apellido = u['nombre'][1]\n ju.FechaNac = u['fechanac']\n ju.idUser = us\n ju.save()\nprint('[*] Cargados %d usuarios' % len(users))\n\ndeportes = [('Futbol', 5, 0), ('Tenis', 1, 0), ('Basquet', 5, 0), ('Paddle', 1, 0), ('Ajedrez', 1, 2), ('Counter Strike 1.6', 5, 1), ('Dota 2', 5, 1), ('League of Legends', 5, 1),]\nfor d in deportes:\n aux = Deporte()\n aux.Deporte = d[0]\n aux.MinJugadores = d[1]\n aux.Tipo = d[2]\n aux.save()\n\nprint('[*] Cargados deportes')\n\nlocs = ['Online', 'Rafaela', 'Sunchales', 'Hawai', 'Bahamas', 'Santa Fe', 'CABA',\n 'San Juan', 'Cordoba', 'San Luis', 'San Justo', 'Salta', 'La Plata',\n 'Parque Patricios', 'Nuñez', 'Angelica', 'Reconquista', 'El Trebol',\n 'Puerto Madrin', 'Bahia Blanca', 'San Martin', 'Mendoza', 'Salto',\n 'Yapeyu', 'Totoras', 'Lincoln', 'Belgrano',\n ]\n\nfor l in locs:\n aux = Localidad()\n aux.Nombre = l\n aux.save()\nprint('[*] Cargadas localidades')\n\n\nnombres_instituciones = [\n 'La redo', 'Playfutbol', 'Soccer', 'Paddle friends', 'La redondita',\n 'Boca jrs', 'C.A.R.P', 'Jockey Club', 'Los nogales', 'La rural',\n 'Divertite', 'Pinocho', 'La mula', 'Club Platense', 'Los 3 hermanos',\n 'La abadia', 'Picachu', 'Dreamers', 'DeportesYa', 'Tu partido',\n ]\n\n\nlocalidades = list(Localidad.objects.all())\nfor i in nombres_instituciones:\n iux = Institucion()\n iux.Nombre = i\n iux.Localidad = choice(localidades)\n iux.save()\n\niux = Institucion()\niux.Nombre = \"Server 1\"\niux.Localidad = Localidad.objects.filter(Nombre='Online')[0]\niux.save()\n\nprint('[*] Cargadas instituciones')\n\n\npalabras = [\n ['capos', 'mejores', 'guerreros', 'luchadores', 'gnomos', 'pasteles', 'chicos', 'amigos', 'samurai', 'monjes', 'monos', 'chicos'],\n ['del pais', 'de la ciudad', 'de la pradera', 'del colegio', 'del mundo', 'del juego', 'de la victoria', 'del triunfo', 'del python', 'de oriente',\n 'de la montaña', 'del universo', 'de la galaxia', 'del deporte']\n ]\n\nnombre_equipo = list(itertools.product(palabras[0], palabras[1]))\nfor d in Deporte.objects.all():\n for _ in range(equipos_por_deporte):\n r = choice(nombre_equipo)\n e = Equipo()\n e.Nombre = \" \".join(r)\n if d.Tipo == 1:\n e.Localidad = Localidad.objects.filter(Nombre='Online')[0]\n else:\n e.Localidad = choice(Localidad.objects.all())\n\n e.Deporte = d\n e.save()\n\n jugadores = list(Jugador.objects.all())\n shuffle(jugadores)\n for _ in range(d.MinJugadores):\n je = Jugador_equipo()\n je.idEquipo = e\n je.idJugador = jugadores.pop()\n je.Encargado = choice([True, False])\n je.Aceptado = True\n je.save()\n\nprint('[*] Cargados equipos y jugadores a los equipos')\n\ninstituciones = list(Institucion.objects.all())\nfor d in Deporte.objects.all():\n for _ in range(num_desafios_por_deporte):\n equipos = list(Equipo.objects.filter(Deporte=d))\n if len(equipos) > 1:\n shuffle(equipos)\n\n desafio = Desafio()\n desafio.Deporte = d\n desafio.ParticipanteA = equipos.pop()\n desafio.ParticipanteB = equipos.pop()\n desafio.Lugar = choice(instituciones)\n desafio.AceptadoA = True\n desafio.AceptadoB = True\n desafio.FechaHora = datetime(randint(1990, 2016), randint(1, 12), randint(1, 28), randint(1, 23), randint(1, 59))\n desafio.save()\n\nprint('[*] Cargados desafios')\n\n\n# Extras\nfor c in ['Fair play', 'Puntualidad', 'Buena onda', 'Respeto', 'Habilidad', 'Respeto', 'Habilidad']:\n cux = Criterio()\n cux.Criterio = c\n cux.save()\nprint('[*] Cargados criterios de calificacion')\n\n\n", "sub_path": "jugatealgo/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 4816, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 33, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 34, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 82, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 99, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 102, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 108, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 114, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 119, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 130, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "563489826", "text": "# 计算多天的检测率 表示检测的完备度\n# 需要进行计算的日期 单独列为一个程序\n\nimport datetime\nfrom detectStatistics import detect_result\n\ndef days(startdate,enddate):\n datestart = datetime.datetime.strptime(startdate, '%Y/%m/%d')\n dateend = datetime.datetime.strptime(enddate, '%Y/%m/%d')\n dayslist = []\n while datestart < dateend:\n datestart += datetime.timedelta(days=1)\n dayslist.append(datestart.strftime('%Y/%m/%d'))\n # 不包括startdate 在结果中直接添加\n dayslist.insert(0,startdate)\n return dayslist\n\n\n# 计算多次\nstartdate = '2020/12/21'\nenddate = '2020/12/27'\n\n# 总数 180970\nnum = 18097\n\nday = days(startdate,enddate)\n# 用字典保存 每日的检测率 {'2020/12/21': 0.0021550533237553187, '2020/12/22': 0.0022213626567939436,\ndetect_completion = {}\nfor i in range(len(day)):\n # date 表示计算第i天的检测率 统计第一天到第i天的检测结果\n dates = days(startdate,day[i])\n result = detect_result(dates)\n deteceted_rate = len(result)/num\n detect_completion[day[i]] = deteceted_rate\n\n# for key,value in detect_completion.items():\n# print(type(key),value)\n\nprint(detect_completion)\n\n# # 绘制完备度分布图\n# # 横坐标为日期 纵坐标为检测率(百分比\n# date = [key for key,value in detect_completion.items()]\n# rate = [value for key,value in detect_completion.items()]\n#\n# import matplotlib.pyplot as plt\n# from matplotlib.ticker import FuncFormatter\n#\n# plt.xlabel('date')\n# plt.ylabel('completion')\n# # 设置纵坐标为百分比\n# # def to_percent(temp, position):\n# # return '%1.0f'%(10*temp) + '%'\n# # plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n#\n# plt.plot(date,rate)\n# plt.show()", "sub_path": "detectedCompletion.py", "file_name": "detectedCompletion.py", "file_ext": "py", "file_size_in_byte": 1751, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "detectStatistics.detect_result", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "111280072", "text": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom collections import OrderedDict\nimport functools\nimport re\nfrom typing import Dict, Sequence, Tuple, Type, Union\nimport pkg_resources\n\nimport google.api_core.client_options as ClientOptions # type: ignore\nfrom google.api_core import exceptions as core_exceptions # type: ignore\nfrom google.api_core import gapic_v1 # type: ignore\nfrom google.api_core import retry as retries # type: ignore\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.cloud.devtools.containeranalysis_v1.types import containeranalysis\nfrom google.iam.v1 import iam_policy_pb2 # type: ignore\nfrom google.iam.v1 import policy_pb2 # type: ignore\nfrom .transports.base import ContainerAnalysisTransport, DEFAULT_CLIENT_INFO\nfrom .transports.grpc_asyncio import ContainerAnalysisGrpcAsyncIOTransport\nfrom .client import ContainerAnalysisClient\n\n\nclass ContainerAnalysisAsyncClient:\n \"\"\"Retrieves analysis results of Cloud components such as Docker\n container images. The Container Analysis API is an implementation of\n the `Grafeas `__ API.\n\n Analysis results are stored as a series of occurrences. An\n ``Occurrence`` contains information about a specific analysis\n instance on a resource. An occurrence refers to a ``Note``. A note\n contains details describing the analysis and is generally stored in\n a separate project, called a ``Provider``. Multiple occurrences can\n refer to the same note.\n\n For example, an SSL vulnerability could affect multiple images. In\n this case, there would be one note for the vulnerability and an\n occurrence for each image with the vulnerability referring to that\n note.\n \"\"\"\n\n _client: ContainerAnalysisClient\n\n DEFAULT_ENDPOINT = ContainerAnalysisClient.DEFAULT_ENDPOINT\n DEFAULT_MTLS_ENDPOINT = ContainerAnalysisClient.DEFAULT_MTLS_ENDPOINT\n\n common_billing_account_path = staticmethod(ContainerAnalysisClient.common_billing_account_path)\n parse_common_billing_account_path = staticmethod(ContainerAnalysisClient.parse_common_billing_account_path)\n common_folder_path = staticmethod(ContainerAnalysisClient.common_folder_path)\n parse_common_folder_path = staticmethod(ContainerAnalysisClient.parse_common_folder_path)\n common_organization_path = staticmethod(ContainerAnalysisClient.common_organization_path)\n parse_common_organization_path = staticmethod(ContainerAnalysisClient.parse_common_organization_path)\n common_project_path = staticmethod(ContainerAnalysisClient.common_project_path)\n parse_common_project_path = staticmethod(ContainerAnalysisClient.parse_common_project_path)\n common_location_path = staticmethod(ContainerAnalysisClient.common_location_path)\n parse_common_location_path = staticmethod(ContainerAnalysisClient.parse_common_location_path)\n\n @classmethod\n def from_service_account_info(cls, info: dict, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n info.\n\n Args:\n info (dict): The service account private key info.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n ContainerAnalysisAsyncClient: The constructed client.\n \"\"\"\n return ContainerAnalysisClient.from_service_account_info.__func__(ContainerAnalysisAsyncClient, info, *args, **kwargs) # type: ignore\n\n @classmethod\n def from_service_account_file(cls, filename: str, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n ContainerAnalysisAsyncClient: The constructed client.\n \"\"\"\n return ContainerAnalysisClient.from_service_account_file.__func__(ContainerAnalysisAsyncClient, filename, *args, **kwargs) # type: ignore\n\n from_service_account_json = from_service_account_file\n\n @property\n def transport(self) -> ContainerAnalysisTransport:\n \"\"\"Returns the transport used by the client instance.\n\n Returns:\n ContainerAnalysisTransport: The transport used by the client instance.\n \"\"\"\n return self._client.transport\n\n get_transport_class = functools.partial(type(ContainerAnalysisClient).get_transport_class, type(ContainerAnalysisClient))\n\n def __init__(self, *,\n credentials: ga_credentials.Credentials = None,\n transport: Union[str, ContainerAnalysisTransport] = \"grpc_asyncio\",\n client_options: ClientOptions = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiates the container analysis client.\n\n Args:\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n transport (Union[str, ~.ContainerAnalysisTransport]): The\n transport to use. If set to None, a transport is chosen\n automatically.\n client_options (ClientOptions): Custom options for the client. It\n won't take effect if a ``transport`` instance is provided.\n (1) The ``api_endpoint`` property can be used to override the\n default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT\n environment variable can also be used to override the endpoint:\n \"always\" (always use the default mTLS endpoint), \"never\" (always\n use the default regular endpoint) and \"auto\" (auto switch to the\n default mTLS endpoint if client certificate is present, this is\n the default value). However, the ``api_endpoint`` property takes\n precedence if provided.\n (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable\n is \"true\", then the ``client_cert_source`` property can be used\n to provide client certificate for mutual TLS transport. If\n not provided, the default SSL client certificate will be used if\n present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is \"false\" or not\n set, no client certificate will be used.\n\n Raises:\n google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n self._client = ContainerAnalysisClient(\n credentials=credentials,\n transport=transport,\n client_options=client_options,\n client_info=client_info,\n\n )\n\n async def set_iam_policy(self,\n request: iam_policy_pb2.SetIamPolicyRequest = None,\n *,\n resource: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n r\"\"\"Sets the access control policy on the specified note or\n occurrence. Requires ``containeranalysis.notes.setIamPolicy`` or\n ``containeranalysis.occurrences.setIamPolicy`` permission if the\n resource is a note or an occurrence, respectively.\n\n The resource takes the format\n ``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and\n ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for\n occurrences.\n\n Args:\n request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):\n The request object. Request message for `SetIamPolicy`\n method.\n resource (:class:`str`):\n REQUIRED: The resource for which the\n policy is being specified. See the\n operation documentation for the\n appropriate value for this field.\n\n This corresponds to the ``resource`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.iam.v1.policy_pb2.Policy:\n Defines an Identity and Access Management (IAM) policy. It is used to\n specify access control policies for Cloud Platform\n resources.\n\n A Policy is a collection of bindings. A binding binds\n one or more members to a single role. Members can be\n user accounts, service accounts, Google groups, and\n domains (such as G Suite). A role is a named list of\n permissions (defined by IAM or configured by users).\n A binding can optionally specify a condition, which\n is a logic expression that further constrains the\n role binding based on attributes about the request\n and/or target resource.\n\n **JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\":\n \"roles/resourcemanager.organizationAdmin\",\n \"members\": [ \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n\n }, { \"role\":\n \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": { \"title\": \"expirable access\",\n \"description\": \"Does not grant access after\n Sep 2020\", \"expression\": \"request.time <\n timestamp('2020-10-01T00:00:00.000Z')\", } }\n\n ]\n\n }\n\n **YAML Example**\n\n bindings: - members: - user:\\ mike@example.com -\n group:\\ admins@example.com - domain:google.com -\n serviceAccount:\\ my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin -\n members: - user:\\ eve@example.com role:\n roles/resourcemanager.organizationViewer\n condition: title: expirable access description:\n Does not grant access after Sep 2020 expression:\n request.time <\n timestamp('2020-10-01T00:00:00.000Z')\n\n For a description of IAM and its features, see the\n [IAM developer's\n guide](\\ https://cloud.google.com/iam/docs).\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([resource])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.SetIamPolicyRequest(**request)\n elif not request:\n request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, )\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.set_iam_policy,\n default_timeout=30.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"resource\", request.resource),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def get_iam_policy(self,\n request: iam_policy_pb2.GetIamPolicyRequest = None,\n *,\n resource: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n r\"\"\"Gets the access control policy for a note or an occurrence\n resource. Requires ``containeranalysis.notes.setIamPolicy`` or\n ``containeranalysis.occurrences.setIamPolicy`` permission if the\n resource is a note or occurrence, respectively.\n\n The resource takes the format\n ``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and\n ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for\n occurrences.\n\n Args:\n request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):\n The request object. Request message for `GetIamPolicy`\n method.\n resource (:class:`str`):\n REQUIRED: The resource for which the\n policy is being requested. See the\n operation documentation for the\n appropriate value for this field.\n\n This corresponds to the ``resource`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.iam.v1.policy_pb2.Policy:\n Defines an Identity and Access Management (IAM) policy. It is used to\n specify access control policies for Cloud Platform\n resources.\n\n A Policy is a collection of bindings. A binding binds\n one or more members to a single role. Members can be\n user accounts, service accounts, Google groups, and\n domains (such as G Suite). A role is a named list of\n permissions (defined by IAM or configured by users).\n A binding can optionally specify a condition, which\n is a logic expression that further constrains the\n role binding based on attributes about the request\n and/or target resource.\n\n **JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\":\n \"roles/resourcemanager.organizationAdmin\",\n \"members\": [ \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n\n }, { \"role\":\n \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": { \"title\": \"expirable access\",\n \"description\": \"Does not grant access after\n Sep 2020\", \"expression\": \"request.time <\n timestamp('2020-10-01T00:00:00.000Z')\", } }\n\n ]\n\n }\n\n **YAML Example**\n\n bindings: - members: - user:\\ mike@example.com -\n group:\\ admins@example.com - domain:google.com -\n serviceAccount:\\ my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin -\n members: - user:\\ eve@example.com role:\n roles/resourcemanager.organizationViewer\n condition: title: expirable access description:\n Does not grant access after Sep 2020 expression:\n request.time <\n timestamp('2020-10-01T00:00:00.000Z')\n\n For a description of IAM and its features, see the\n [IAM developer's\n guide](\\ https://cloud.google.com/iam/docs).\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([resource])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.GetIamPolicyRequest(**request)\n elif not request:\n request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, )\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_iam_policy,\n default_timeout=30.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"resource\", request.resource),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def test_iam_permissions(self,\n request: iam_policy_pb2.TestIamPermissionsRequest = None,\n *,\n resource: str = None,\n permissions: Sequence[str] = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> iam_policy_pb2.TestIamPermissionsResponse:\n r\"\"\"Returns the permissions that a caller has on the specified note\n or occurrence. Requires list permission on the project (for\n example, ``containeranalysis.notes.list``).\n\n The resource takes the format\n ``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and\n ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for\n occurrences.\n\n Args:\n request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):\n The request object. Request message for\n `TestIamPermissions` method.\n resource (:class:`str`):\n REQUIRED: The resource for which the\n policy detail is being requested. See\n the operation documentation for the\n appropriate value for this field.\n\n This corresponds to the ``resource`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n permissions (:class:`Sequence[str]`):\n The set of permissions to check for the ``resource``.\n Permissions with wildcards (such as '*' or 'storage.*')\n are not allowed. For more information see `IAM\n Overview `__.\n\n This corresponds to the ``permissions`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:\n Response message for TestIamPermissions method.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([resource, permissions])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.TestIamPermissionsRequest(**request)\n elif not request:\n request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, )\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.test_iam_permissions,\n default_timeout=30.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"resource\", request.resource),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def get_vulnerability_occurrences_summary(self,\n request: containeranalysis.GetVulnerabilityOccurrencesSummaryRequest = None,\n *,\n parent: str = None,\n filter: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> containeranalysis.VulnerabilityOccurrencesSummary:\n r\"\"\"Gets a summary of the number and severity of\n occurrences.\n\n Args:\n request (:class:`google.cloud.devtools.containeranalysis_v1.types.GetVulnerabilityOccurrencesSummaryRequest`):\n The request object. Request to get a vulnerability\n summary for some set of occurrences.\n parent (:class:`str`):\n The name of the project to get a vulnerability summary\n for in the form of ``projects/[PROJECT_ID]``.\n\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n filter (:class:`str`):\n The filter expression.\n This corresponds to the ``filter`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.devtools.containeranalysis_v1.types.VulnerabilityOccurrencesSummary:\n A summary of how many vulnerability\n occurrences there are per resource and\n severity type.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, filter])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n request = containeranalysis.GetVulnerabilityOccurrencesSummaryRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n if filter is not None:\n request.filter = filter\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_vulnerability_occurrences_summary,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"parent\", request.parent),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n\n\n\n\ntry:\n DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n gapic_version=pkg_resources.get_distribution(\n \"google-cloud-devtools-containeranalysis\",\n ).version,\n )\nexcept pkg_resources.DistributionNotFound:\n DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()\n\n\n__all__ = (\n \"ContainerAnalysisAsyncClient\",\n)\n", "sub_path": "google/devtools/containeranalysis/v1/devtools-containeranalysis-v1-py/google/cloud/devtools/containeranalysis_v1/services/container_analysis/async_client.py", "file_name": "async_client.py", "file_ext": "py", "file_size_in_byte": 27932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "client.ContainerAnalysisClient", "line_number": 55, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.DEFAULT_ENDPOINT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 57, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.DEFAULT_MTLS_ENDPOINT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 58, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.common_billing_account_path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 60, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.parse_common_billing_account_path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 61, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.common_folder_path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 62, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.parse_common_folder_path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 63, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.common_organization_path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 64, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.parse_common_organization_path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 65, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.common_project_path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 66, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.parse_common_project_path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 67, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.common_location_path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 68, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.parse_common_location_path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 69, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.from_service_account_info.__func__", "line_number": 84, "usage_type": "call"}, {"api_name": "client.ContainerAnalysisClient.from_service_account_info", "line_number": 84, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 84, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient.from_service_account_file.__func__", "line_number": 100, "usage_type": "call"}, {"api_name": "client.ContainerAnalysisClient.from_service_account_file", "line_number": 100, "usage_type": "attribute"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 100, "usage_type": "name"}, {"api_name": "transports.base.ContainerAnalysisTransport", "line_number": 105, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 113, "usage_type": "call"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 113, "usage_type": "argument"}, {"api_name": "google.auth.credentials.Credentials", "line_number": 116, "usage_type": "attribute"}, {"api_name": "google.auth.credentials", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 117, "usage_type": "name"}, {"api_name": "transports.base.ContainerAnalysisTransport", "line_number": 117, "usage_type": "name"}, {"api_name": "google.api_core.client_options", "line_number": 118, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.client_info", "line_number": 119, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 119, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 119, "usage_type": "name"}, {"api_name": "client.ContainerAnalysisClient", "line_number": 153, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest", "line_number": 162, "usage_type": "attribute"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 162, "usage_type": "name"}, {"api_name": "google.api_core.retry.Retry", "line_number": 165, "usage_type": "attribute"}, {"api_name": "google.api_core.retry", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 167, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method", "line_number": 165, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 165, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest", "line_number": 268, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 268, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest", "line_number": 270, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 270, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method_async.wrap_method", "line_number": 274, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.method_async", "line_number": 274, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 274, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 277, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.routing_header.to_grpc_metadata", "line_number": 283, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.routing_header", "line_number": 283, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 283, "usage_type": "name"}, {"api_name": "google.iam.v1.policy_pb2.Policy", "line_number": 168, "usage_type": "attribute"}, {"api_name": "google.iam.v1.policy_pb2", "line_number": 168, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest", "line_number": 300, "usage_type": "attribute"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 300, "usage_type": "name"}, {"api_name": "google.api_core.retry.Retry", "line_number": 303, "usage_type": "attribute"}, {"api_name": "google.api_core.retry", "line_number": 303, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 305, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 305, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method", "line_number": 303, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 303, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest", "line_number": 406, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 406, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest", "line_number": 408, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 408, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method_async.wrap_method", "line_number": 412, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.method_async", "line_number": 412, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 412, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 415, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.routing_header.to_grpc_metadata", "line_number": 421, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.routing_header", "line_number": 421, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 421, "usage_type": "name"}, {"api_name": "google.iam.v1.policy_pb2.Policy", "line_number": 306, "usage_type": "attribute"}, {"api_name": "google.iam.v1.policy_pb2", "line_number": 306, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest", "line_number": 438, "usage_type": "attribute"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 438, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 441, "usage_type": "name"}, {"api_name": "google.api_core.retry.Retry", "line_number": 442, "usage_type": "attribute"}, {"api_name": "google.api_core.retry", "line_number": 442, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 444, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 444, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method", "line_number": 442, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 442, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest", "line_number": 498, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 498, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest", "line_number": 500, "usage_type": "call"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 500, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method_async.wrap_method", "line_number": 504, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.method_async", "line_number": 504, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 504, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 507, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.routing_header.to_grpc_metadata", "line_number": 513, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.routing_header", "line_number": 513, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 513, "usage_type": "name"}, {"api_name": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", "line_number": 445, "usage_type": "attribute"}, {"api_name": "google.iam.v1.iam_policy_pb2", "line_number": 445, "usage_type": "name"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis.GetVulnerabilityOccurrencesSummaryRequest", "line_number": 530, "usage_type": "attribute"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis", "line_number": 530, "usage_type": "name"}, {"api_name": "google.api_core.retry.Retry", "line_number": 534, "usage_type": "attribute"}, {"api_name": "google.api_core.retry", "line_number": 534, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 536, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 536, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method", "line_number": 534, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 534, "usage_type": "name"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis.GetVulnerabilityOccurrencesSummaryRequest", "line_number": 578, "usage_type": "call"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis", "line_number": 578, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.method_async.wrap_method", "line_number": 589, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.method_async", "line_number": 589, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 589, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 592, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.routing_header.to_grpc_metadata", "line_number": 598, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.routing_header", "line_number": 598, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 598, "usage_type": "name"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis.VulnerabilityOccurrencesSummary", "line_number": 537, "usage_type": "attribute"}, {"api_name": "google.cloud.devtools.containeranalysis_v1.types.containeranalysis", "line_number": 537, "usage_type": "name"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 619, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.client_info.ClientInfo", "line_number": 619, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.client_info", "line_number": 619, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 619, "usage_type": "name"}, {"api_name": "pkg_resources.get_distribution", "line_number": 620, "usage_type": "call"}, {"api_name": "pkg_resources.DistributionNotFound", "line_number": 624, "usage_type": "attribute"}, {"api_name": "transports.base.DEFAULT_CLIENT_INFO", "line_number": 625, "usage_type": "name"}, {"api_name": "google.api_core.gapic_v1.client_info.ClientInfo", "line_number": 625, "usage_type": "call"}, {"api_name": "google.api_core.gapic_v1.client_info", "line_number": 625, "usage_type": "attribute"}, {"api_name": "google.api_core.gapic_v1", "line_number": 625, "usage_type": "name"}]} +{"seq_id": "356051032", "text": "import time\r\nimport traceback, sys\r\nimport random\r\n\r\nfrom statistics import mean\r\nfrom statistics import median\r\n\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\n\r\nQApplication.setAttribute(Qt.AA_Use96Dpi) # This fixes the scaling issue in Windows\r\n\r\n# Sets (0 = OR set, 1 = AND set, 2 = XOR set, 3 = IF->THEN set, 4 = IFF set)\r\nsets = [\r\n [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]],\r\n [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]],\r\n [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]],\r\n [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 1]],\r\n [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1]],\r\n]\r\n\r\n\r\nclass Environment:\r\n def __init__(self):\r\n self.ta = [] # The Tsetlin Machine\r\n self.X = [] # X input\r\n self.y = 0 # y input\r\n self.s = 3.9 # s - used for rewards\r\n self.num_states = 10 # number of states (leave at 10 since the GUI has been hardcoded for that value)\r\n self.num_rounds = 100000 # Maximum number of rounds to play before giving up\r\n self.verbose = 0 # Determines whether the environment will print\r\n self.operation = 1 # The current set being worked on\r\n self.turbo = 0 # Deactivates time delay from reward/punishments\r\n self.noise = 0 # Toggles noise on/off\r\n self.running = 0 # Whether the environment is running\r\n self.round = -1 # The current round number, set to -1 if inactive (for GUI purposes)\r\n self.message = '' # Message displayed in lower right corner of the GUI\r\n self.s_stats = '' # String for the statistics shown in the GUI\r\n self.passed_test = [0] * 4 # Used in the truth table displayed in the lower left corner of the GUI\r\n self.stats = [] # Bookkeeping about the current operation\r\n self.game = 0 # How many games have been played with this operation\r\n\r\n # Initialize the Tsetlin Automata here (for the GUI)\r\n self.ta = [[Tsetlin(self.num_states) for x in range(4)] for y in range(2)]\r\n\r\n def set_X(self, X):\r\n self.X = X\r\n\r\n def set_y(self, y):\r\n self.y = y\r\n\r\n def pause(self):\r\n if not self.turbo:\r\n time.sleep(0.025)\r\n\r\n def setup_tsetlin(self): # Load current example into the Tsetlin Automata\r\n for y in range(2):\r\n for x in range(4):\r\n self.ta[y][x].value = env.X[x % 2]\r\n if x >= 2:\r\n self.ta[y][x].value = int(not (self.ta[y][x].value))\r\n\r\n def get_tsetlin_state(self, x):\r\n return self.ta[int(x/4)][x % 4].state\r\n\r\n def eval_clause(self, disjunctive): # Returns the sum of our conjunctive clauses, or the disjunction\r\n\r\n clauses = [1, 1] # An empty conjunctive clause is always true\r\n\r\n for y in range(2):\r\n for x in range(4):\r\n if self.ta[y][x].included():\r\n clauses[y] &= self.ta[y][x].value\r\n\r\n if disjunctive: # If disjunctive is set, we OR the two clauses together\r\n return clauses[0] | clauses[1]\r\n else: # Otherwise, we add the two clauses together\r\n return clauses[0] + clauses[1]\r\n\r\n def feedback(self, type): # Gives Type I or II Feedback to our literals\r\n for y in range(2):\r\n for x in range(4):\r\n\r\n r = random.random()\r\n\r\n if type == 1: # ** Type I Feedback **\r\n if self.eval_clause(1): # Target clause evaluates to 1\r\n if self.ta[y][x].included():\r\n if self.ta[y][x].value: # > included, literal 1\r\n if r < (1 / self.s):\r\n pass\r\n if r < ((self.s - 1) / self.s):\r\n self.ta[y][x].reward() # >> reward\r\n else:\r\n if self.ta[y][x].value: # > not included, literal 1\r\n if r < (1 / self.s):\r\n pass\r\n elif r < ((self.s - 1) / self.s):\r\n self.ta[y][x].penalize() # >> penalty\r\n else: # > not included, literal 0\r\n if r < (1 / self.s):\r\n self.ta[y][x].reward() # >> reward\r\n else: # Target clause evaluates to 0\r\n if self.ta[y][x].included():\r\n if self.ta[y][x].value: # > included, literal 1\r\n if r < (1 / self.s):\r\n self.ta[y][x].penalize() # >> penalty\r\n else: # > included, literal 0\r\n if r < (1 / self.s):\r\n self.ta[y][x].penalize() # >> penalty\r\n else:\r\n if self.ta[y][x].value: # > not included, literal 1\r\n if r < (1 / self.s):\r\n self.ta[y][x].reward() # >> reward\r\n else: # > not included, literal 0\r\n if r < (1 / self.s):\r\n self.ta[y][x].reward() # >> reward\r\n else: # ** Type II Feedback **\r\n if self.eval_clause(1): # > target clause evaluates to 1\r\n if not self.ta[y][x].included() \\\r\n and not self.ta[y][x].value:\r\n self.ta[y][x].penalize() # >> penalty\r\n\r\n def get_literal(self, counter, type):\r\n s_literal = [\" X₁ \", \" X₂ \", \"¬X₁ \", \"¬X₂ \"]\r\n s_literal_alt = [\" X1 \", \" X2 \", \"!X1 \", \"!X2 \"]\r\n\r\n if type:\r\n return s_literal_alt[counter % 4]\r\n\r\n return s_literal[counter % 4]\r\n\r\n def is_included(self, counter):\r\n if self.ta[int(counter / 4)][counter % 4].included():\r\n return 1\r\n else:\r\n return 0\r\n\r\n def get_conjunction(self, counter):\r\n y = int(counter / 4)\r\n _x = counter % 4\r\n\r\n if not self.ta[y][_x].included():\r\n return 0\r\n\r\n num_literals = []\r\n y = int(counter / 4)\r\n\r\n for x in range(4):\r\n if self.ta[y][x].included():\r\n num_literals.append(x)\r\n\r\n for x in range(0, counter % 4 + 1):\r\n if x in num_literals:\r\n num_literals.remove(x)\r\n\r\n if len(num_literals):\r\n return 1\r\n else:\r\n return 0\r\n\r\n def playGame(self):\r\n\r\n self.game += 1\r\n\r\n # Reset the Tsetlin Automata, and give them their states\r\n self.ta = [[Tsetlin(self.num_states) for x in range(4)] for y in range(2)]\r\n\r\n # Used for GUI truth table\r\n self.passed_test = [0, 0, 0, 0]\r\n\r\n for round in range(self.num_rounds):\r\n\r\n self.round = round\r\n self.message = ''\r\n\r\n if not self.running: # Escape if user decided to stop\r\n self.message = 'Cancelled'\r\n break\r\n\r\n example = sets[self.operation][random.randint(0, 3)] # Get a random example each round\r\n # example = sets[self.operation][round % 4] # Go through the examples in order\r\n\r\n self.set_X([example[0], example[1]])\r\n self.set_y(example[2])\r\n\r\n if self.noise and random.random() < 0.4: # Add 40% noise to the dataset\r\n r = random.randint(0, 2)\r\n\r\n if r == 2:\r\n self.y = int(not self.y) # Cast the value as int to avoid it showing as True/False in the GUI\r\n else:\r\n self.X[r] = int(not self.X[r])\r\n\r\n\r\n # Give the Tsetlin Automata their respective values for this example\r\n self.setup_tsetlin()\r\n\r\n while self.eval_clause(1) != self.y: # If this formula is different from y\r\n if self.y and not (self.eval_clause(0)): # and y = 1 and the sum of the conjunctive clauses is 0\r\n self.feedback(1) # then give Type I Feedback\r\n self.message = \"Type I Feedback\"\r\n elif not self.y and self.eval_clause(0): # Otherwise, if y = 0 and sum of the conjunctive clauses > 0\r\n self.feedback(2) # then give Type II Feedback\r\n self.message = \"Type II Feedback\"\r\n\r\n valid = True # Now, let us check if the current formula passed the entire truth table, if so, we can stop\r\n\r\n for i in range(4):\r\n\r\n example = sets[self.operation][i]\r\n self.set_X([example[0], example[1]])\r\n self.set_y(example[2])\r\n self.setup_tsetlin()\r\n\r\n if self.eval_clause(1) == self.y:\r\n self.passed_test[i] = 1\r\n else:\r\n self.passed_test[i] = 0\r\n valid = False\r\n\r\n if valid:\r\n break\r\n\r\n # End round\r\n\r\n if not self.message == 'Cancelled':\r\n self.message = \"Solved in \" + f\"{env.round + 1:,}\" + \" rounds\"\r\n\r\n self.stats.append(self.round + 1)\r\n\r\nclass Tsetlin:\r\n def __init__(self, n):\r\n self.n = n # n is the number of states per action\r\n self.state = random.choice([self.n, self.n + 1]) # Initial state is selected randomly\r\n self.value = 0\r\n\r\n def included(self):\r\n if self.state > self.n:\r\n return True\r\n else:\r\n return False\r\n\r\n def reward(self):\r\n if self.n >= self.state > 1: # Reward: Move towards the left if 1 < state <= n\r\n self.state -= 1\r\n elif self.n < self.state < 2 * self.n: # Reward: Move towards the right if n < state < 2n\r\n self.state += 1\r\n\r\n env.pause()\r\n\r\n\r\n def penalize(self):\r\n if self.state <= self.n: # Penalty: Move right towards the center if state <= n\r\n self.state += 1\r\n elif self.state > self.n: # Penalty: Move left towards the center if state > n\r\n self.state -= 1\r\n\r\n env.pause()\r\n\r\n\r\n# ==========================================================================\r\n# GUI\r\n# ==========================================================================\r\n\r\nclass myCanvas(QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(0, 0, 800, 600)\r\n\r\n def paintEvent(self, e):\r\n painter = QPainter()\r\n painter.begin(self)\r\n self.drawCanvas(painter)\r\n painter.end()\r\n\r\n def drawCanvas(self, painter):\r\n\r\n painter.setRenderHint(QPainter.Antialiasing)\r\n\r\n pen = QPen()\r\n pen.setColor(QColor('white'))\r\n painter.setPen(pen)\r\n\r\n myWidth = 2 # The width of the lines\r\n y_shift = 40\r\n\r\n font = QFont('Helvetica Lt Std', 16)\r\n font.setPixelSize(21)\r\n fm = QFontMetrics(font)\r\n painter.setFont(font)\r\n\r\n painter.setBrush(QColor(255, 255, 255, 255))\r\n painter.drawRect(0, 0, 800, 600)\r\n\r\n painter.setBrush(QColor(255, 0, 0, 255))\r\n pen.setWidth(myWidth)\r\n pen.setColor(QColor('black'))\r\n painter.setPen(pen)\r\n\r\n painter.drawLine(QPoint(391, 40 + y_shift), QPoint(391, 375 + y_shift)) # Vertical Line\r\n painter.setPen(QPen(Qt.black, myWidth, Qt.DashLine))\r\n painter.drawLine(QPoint(0, 205 + y_shift), QPoint(777, 205 + y_shift)) # Dashed Horizontal Line\r\n painter.setPen(QPen(Qt.black, myWidth, Qt.SolidLine))\r\n\r\n if env.round > -1:\r\n s_round = f\"{env.round + 1:,}\"\r\n painter.drawText(0, 25, 'Game ' + str(env.game) + ', Round ' + s_round)\r\n\r\n counter = 0\r\n text_y = 400 + y_shift\r\n\r\n for x in range(17):\r\n\r\n if x % 2:\r\n state = env.get_tsetlin_state(counter)\r\n\r\n if state <= env.num_states:\r\n painter.setBrush(QColor(255, 0, 0, 255))\r\n else:\r\n painter.setBrush(QColor(0, 0, 255, 255))\r\n\r\n painter.drawRoundedRect(x * 46, (20 - state) * 15 + y_shift + 40, 46, 46, 12, 12)\r\n\r\n pen.setColor(QColor('white'))\r\n painter.setPen(pen)\r\n\r\n text_width = fm.width(str(state))\r\n\r\n painter.drawText(x * 46 + (23 - text_width/2), (20 - state) * 15 + y_shift + 70, str(state))\r\n\r\n pen.setColor(QColor('black'))\r\n painter.setPen(pen)\r\n\r\n # Draw/Print the conjunctive clauses\r\n\r\n if not env.is_included(counter):\r\n pen.setColor(QColor('lightGray'))\r\n painter.setPen(pen)\r\n\r\n text_width = fm.width(env.get_literal(counter, 0))\r\n painter.drawText(x * 46 + (23 - text_width/2), text_y, env.get_literal(counter, 0))\r\n\r\n pen.setColor(QColor('black'))\r\n painter.setPen(pen)\r\n\r\n if counter % 4 < 3:\r\n\r\n if not env.get_conjunction(counter):\r\n pen.setColor(QColor('lightGray'))\r\n painter.setPen(pen)\r\n\r\n text_width = fm.width(\"^\")\r\n painter.drawText(x * 46 + 46 + (23 - text_width/2), text_y, \"^\")\r\n\r\n pen.setColor(QColor('black'))\r\n painter.setPen(pen)\r\n\r\n counter += 1\r\n\r\n # End drawing states\r\n\r\n painter.drawText(40, text_y, \"(\")\r\n painter.drawText(777 - 40, text_y, \")\")\r\n painter.drawText(373, text_y, \") v (\")\r\n\r\n if env.round > -1:\r\n\r\n painter.drawText(0, 50, env.s_stats)\r\n\r\n ops = [\"OR\", \"AND\", \"XOR\", \"IF→THEN\", \"IFF\"]\r\n text = \"Running \" + ops[env.operation] + \" set\"\r\n\r\n text_width = fm.width(text)\r\n painter.drawText(775 - text_width, 25, text)\r\n\r\n text = \"X = [\" + str(env.X[0]) + \", \" + str(env.X[1]) + \"], y = \" + str(env.y)\r\n text_width = fm.width(text)\r\n painter.drawText(775 - text_width, 50, text)\r\n\r\n pen.setColor(QColor('lightGray'))\r\n painter.setPen(pen)\r\n\r\n text = env.message\r\n text_width = fm.width(text)\r\n painter.drawText(775 - text_width, 540, text)\r\n\r\n for i in range(4):\r\n check = \"✗\"\r\n\r\n if env.passed_test[i]:\r\n check = \"✓\"\r\n\r\n example = sets[env.operation][i]\r\n text = str(example[0]) + \" \" + str(example[1]) + \" | \" + str(example[2]) + \" \" + check\r\n\r\n painter.drawText(0, 480 + 20 * i, text)\r\n\r\n painter.drawLine(QPoint(38, 465), QPoint(38, 540))\r\n\r\n pen.setColor(QColor('black'))\r\n painter.setPen(pen)\r\n\r\n def _trigger_refresh(self):\r\n self.update()\r\n\r\n\r\ndef reset_environment():\r\n env.round = -1\r\n env.stats = [] # Reset the statistics\r\n env.s_stats = ''\r\n env.game = 0\r\n\r\n\r\nclass Window(QWidget):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n\r\n self.threadpool = QThreadPool()\r\n print(\"Multithreading with maximum %d threads\" % self.threadpool.maxThreadCount())\r\n\r\n self.timer = QTimer()\r\n self.timer.setInterval(1000 / 60)\r\n\r\n self.timer.timeout.connect(self.recurring_timer)\r\n self.timer.start()\r\n\r\n p = self.palette()\r\n p.setColor(self.backgroundRole(), Qt.white)\r\n self.setPalette(p)\r\n\r\n grid_layout = QGridLayout()\r\n self.setLayout(grid_layout)\r\n\r\n # OR Button\r\n self.or_button = QPushButton(\"OR\")\r\n self.or_button.setCheckable(True)\r\n self.or_button.clicked[bool].connect(self.setOperation)\r\n grid_layout.addWidget(self.or_button, 0, 0) # row, column (y, x)\r\n\r\n # AND Button\r\n self.and_button = QPushButton(\"AND\")\r\n self.and_button.setCheckable(True)\r\n self.and_button.toggle()\r\n self.and_button.clicked[bool].connect(self.setOperation)\r\n grid_layout.addWidget(self.and_button, 0, 1)\r\n\r\n # XOR Button\r\n self.xor_button = QPushButton(\"XOR\")\r\n self.xor_button.setCheckable(True)\r\n grid_layout.addWidget(self.xor_button, 0, 2)\r\n self.xor_button.clicked[bool].connect(self.setOperation)\r\n\r\n # IF->THEN Button\r\n self.ifthen_button = QPushButton(\"IF→THEN\")\r\n self.ifthen_button.setCheckable(True)\r\n self.ifthen_button.clicked[bool].connect(self.setOperation)\r\n grid_layout.addWidget(self.ifthen_button, 0, 3)\r\n\r\n # IFF Button\r\n self.iff_button = QPushButton(\"IFF\")\r\n self.iff_button.setCheckable(True)\r\n grid_layout.addWidget(self.iff_button, 0, 4)\r\n self.iff_button.clicked[bool].connect(self.setOperation)\r\n\r\n # Noise Button\r\n self.noise_button = QPushButton(\"Noise\")\r\n self.noise_button.setCheckable(True)\r\n grid_layout.addWidget(self.noise_button, 0, 6)\r\n self.noise_button.clicked[bool].connect(self.toggleNoise)\r\n\r\n # Fast Forward Button\r\n self.ff_button = QPushButton(\"⯈⯈\")\r\n self.ff_button.setCheckable(True)\r\n grid_layout.addWidget(self.ff_button, 0, 7)\r\n self.ff_button.clicked[bool].connect(self.toggleSpeed)\r\n\r\n # Run Button\r\n self.run_button = QPushButton(\"Run\")\r\n self.run_button.setCheckable(True)\r\n grid_layout.addWidget(self.run_button, 0, 8)\r\n self.run_button.clicked[bool].connect(self.toggleRunning)\r\n\r\n self.canvas = myCanvas()\r\n grid_layout.addWidget(self.canvas, 1, 0, 5, 9)\r\n\r\n self.setGeometry(600, 400, 800, 600)\r\n self.setWindowTitle('IKT440 | Assignment 2')\r\n\r\n def recurring_timer(self):\r\n self.canvas._trigger_refresh() # This is our time-step, and updates the window every 1/60 second\r\n\r\n def progress_fn(self, n):\r\n print(\"Done.\")\r\n\r\n def execute_this_fn(self, progress_callback):\r\n env.playGame()\r\n return \"Done.\"\r\n\r\n def print_output(self, s):\r\n print(s)\r\n\r\n def thread_complete(self):\r\n print(\"Thread complete.\")\r\n\r\n if self.run_button.isChecked():\r\n self.run_button.toggle()\r\n env.running = not env.running\r\n\r\n self.or_button.setEnabled(True)\r\n self.and_button.setEnabled(True)\r\n self.xor_button.setEnabled(True)\r\n self.ifthen_button.setEnabled(True)\r\n self.iff_button.setEnabled(True)\r\n\r\n env.s_stats = f\"min = {min(env.stats):,} | max = {max(env.stats):,} | r̄ = {mean(env.stats):,.0f}\" \\\r\n f\" | median = {median(env.stats):,.0f}\"\r\n\r\n def toggleNoise(self, pressed):\r\n env.noise = not env.noise\r\n reset_environment()\r\n\r\n def toggleSpeed(self, pressed):\r\n env.turbo = not env.turbo\r\n\r\n def toggleRunning(self, pressed):\r\n env.running = not env.running\r\n\r\n if env.running:\r\n\r\n self.or_button.setEnabled(False)\r\n self.and_button.setEnabled(False)\r\n self.xor_button.setEnabled(False)\r\n self.ifthen_button.setEnabled(False)\r\n self.iff_button.setEnabled(False)\r\n\r\n worker = Worker(self.execute_this_fn) # Any other args, kwargs are passed to the run function\r\n worker.signals.result.connect(self.print_output)\r\n worker.signals.finished.connect(self.thread_complete)\r\n worker.signals.progress.connect(self.progress_fn)\r\n self.threadpool.start(worker) # Execute\r\n\r\n else:\r\n self.or_button.setEnabled(True)\r\n self.and_button.setEnabled(True)\r\n self.xor_button.setEnabled(True)\r\n self.ifthen_button.setEnabled(True)\r\n self.iff_button.setEnabled(True)\r\n\r\n\r\n def setOperation(self):\r\n\r\n source = self.sender()\r\n\r\n if not self.or_button.isChecked() and source.text() == \"OR\":\r\n self.or_button.toggle()\r\n\r\n if not self.and_button.isChecked() and source.text() == \"AND\":\r\n self.and_button.toggle()\r\n\r\n if not self.xor_button.isChecked() and source.text() == \"XOR\":\r\n self.xor_button.toggle()\r\n\r\n if not self.ifthen_button.isChecked() and source.text() == \"IF→THEN\":\r\n self.ifthen_button.toggle()\r\n\r\n if not self.iff_button.isChecked() and source.text() == \"IFF\":\r\n self.iff_button.toggle()\r\n\r\n if self.or_button.isChecked() and source.text() != \"OR\":\r\n self.or_button.toggle()\r\n elif self.and_button.isChecked() and source.text() != \"AND\":\r\n self.and_button.toggle()\r\n elif self.xor_button.isChecked() and source.text() != \"XOR\":\r\n self.xor_button.toggle()\r\n elif self.ifthen_button.isChecked() and source.text() != \"IF→THEN\":\r\n self.ifthen_button.toggle()\r\n elif self.iff_button.isChecked() and source.text() != \"IFF\":\r\n self.iff_button.toggle()\r\n\r\n if source.text() == \"OR\":\r\n env.operation = 0\r\n elif source.text() == \"AND\":\r\n env.operation = 1\r\n elif source.text() == \"XOR\":\r\n env.operation = 2\r\n elif source.text() == \"IF→THEN\":\r\n env.operation = 3\r\n else:\r\n env.operation = 4\r\n\r\n reset_environment()\r\n\r\n\r\n# ==========================================================================\r\n# Multithreading\r\n# ==========================================================================\r\n\r\nclass WorkerSignals(QObject):\r\n finished = pyqtSignal()\r\n error = pyqtSignal(tuple)\r\n result = pyqtSignal(object)\r\n progress = pyqtSignal(int)\r\n\r\n\r\nclass Worker(QRunnable):\r\n def __init__(self, fn, *args, **kwargs):\r\n super(Worker, self).__init__()\r\n\r\n # Store constructor arguments (re-used for processing)\r\n self.fn = fn\r\n self.args = args\r\n self.kwargs = kwargs\r\n self.signals = WorkerSignals()\r\n\r\n # Add the callback to our kwargs\r\n self.kwargs['progress_callback'] = self.signals.progress\r\n\r\n @pyqtSlot()\r\n def run(self):\r\n # Retrieve args/kwargs here; and fire processing using them\r\n try:\r\n result = self.fn(*self.args, **self.kwargs)\r\n except:\r\n traceback.print_exc()\r\n exctype, value = sys.exc_info()[:2]\r\n self.signals.error.emit((exctype, value, traceback.format_exc()))\r\n else:\r\n self.signals.result.emit(result) # Return the result of the processing\r\n finally:\r\n self.signals.finished.emit() # Done\r\n\r\n\r\nenv = Environment()\r\n\r\napp = QApplication([])\r\nWindow = Window()\r\nWindow.setGeometry(0, 0, 800, 600)\r\nWindow.show()\r\n\r\napp.exec_()\r\n", "sub_path": "Tsetlin_GUI.py", "file_name": "Tsetlin_GUI.py", "file_ext": "py", "file_size_in_byte": 23671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "random.random", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 182, "usage_type": "call"}, {"api_name": "random.random", "line_number": 188, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 189, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 236, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 521, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 522, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 630, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 631, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 632, "usage_type": "call"}]} +{"seq_id": "512131870", "text": "\"\"\"wvpoi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Import the include() function: from django.conf.urls import url, include\n 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='home'),\n url(r'^tool/$', views.tool, name='tool'),\n url(r'^listings/$', views.listings, name='listings'),\n url(r'^map/$', views.map_view, name='map'),\n url(r'^api/$', views.api, name='api'),\n url(r'^api/get-listings/$', views.get_listings, name='get_listings'),\n]\n", "sub_path": "django-site/wvpoi/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "271220805", "text": "from PIL import Image, ImageEnhance\nimport cv2\nimport os\nimport random as r\nimport numpy as np\n\n\ndef read_files(data_dir, file_name={}):\n\n image_name = os.path.join(data_dir, 'image', file_name['image'])\n trimap_name = os.path.join(data_dir, 'trimap', file_name['trimap'])\n\n image = cv2.imread(image_name)\n trimap = cv2.imread(trimap_name)\n\n return image, trimap\n\n\ndef random_scale_and_creat_patch(image):\n # color random\n if r.random() < 0.5:\n image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n color_image = ImageEnhance.Color(\n image).enhance(random_factor) # 调整图像的饱和度\n random_factor = np.random.randint(10, 21) / 10. # 随机因子\n brightness_image = ImageEnhance.Brightness(\n color_image).enhance(random_factor) # 调整图像的亮度\n random_factor = np.random.randint(10, 21) / 10. # 随机因1子\n contrast_image = ImageEnhance.Contrast(\n brightness_image).enhance(random_factor) # 调整图像对比度\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n image = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)\n image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n\n return image\n\n\ndef rotate_bound(image, trimap):\n # 获取图像的尺寸\n # 旋转中心\n angle = np.random.randint(-8, 8)\n (h, w) = image.shape[:2]\n (cx, cy) = (w/2, h/2)\n\n # 设置旋转矩阵\n M = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n # 计算图像旋转后的新边界\n nW = abs(int((h*sin)-(w*cos)))\n nH = abs(int((h*cos)-(w*sin)))\n\n # 调整旋转矩阵的移动距离(t_{x}, t_{y})\n M[0, 2] += (nW/2) - cx\n M[1, 2] += (nH/2) - cy\n\n image = cv2.warpAffine(image, M, (nW, nH))\n trimap = cv2.warpAffine(trimap, M, (nW, nH))\n return image, trimap\n\n # image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n # random_angle = np.random.randint(-10, 10)\n # image = image.rotate(random_angle, Image.BICUBIC)\n # image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n # return image\n\n\ndef main():\n num = 1\n while num < 9:\n imagepath = \"D:/m/data/new-data-single/image/h (\"+str(num)+\").jpg\"\n trimapath = \"D:/m/data/new-data-single/alpha/h (\"+str(num)+\").png\"\n image = cv2.imread(imagepath)\n trimap = cv2.imread(trimapath)\n i = 0\n while i < 20:\n image_c = random_scale_and_creat_patch(image)\n image_q, trimap_q = rotate_bound(image_c, trimap)\n (h, w) = image_q.shape[:2]\n image_r = cv2.resize(image_q, (600, int(h*600/w)),\n interpolation=cv2.INTER_CUBIC)\n (nh, nw) = image_r.shape[:2]\n alpha = cv2.resize(\n trimap_q, (nw, nh), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(\n \"D:/m/data/new-data-single/image1/h (\"+str(num)+\")_\"+str(i)+\".jpg\", image_r)\n cv2.imwrite(\n \"D:/m/data/new-data-single/alpha1/h (\"+str(num)+\")_\"+str(i)+\".png\", alpha)\n i += 1\n num += 1\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "ImageEnhance.py", "file_name": "ImageEnhance.py", "file_ext": "py", "file_size_in_byte": 3306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "random.random", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.ImageEnhance.Color", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PIL.ImageEnhance.Brightness", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.ImageEnhance.Contrast", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PIL.ImageEnhance.Sharpness", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 33, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "596994432", "text": "import numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import f1_score\n\nfrom keras.models import Sequential\nfrom keras.models import Model\nfrom keras import layers\nfrom keras import backend as K\nimport tensorflow_hub as hub\n# nltk.download(\"tokenize\")\nfrom os import listdir\nfrom keras.layers import Flatten,Activation,GlobalMaxPooling1D\nfrom keras.layers.merge import add\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda,Conv1D,MaxPooling1D\nimport tensorflow\nimport keras\nsession_conf = tensorflow.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4)\ntensorflow.set_random_seed(1)\nsess = tensorflow.Session(graph=tensorflow.get_default_graph(), config=session_conf)\nkeras.backend.set_session(sess)\nimport tensorflow as tf\n\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\nK.set_session(sess)\nsess.run(tf.global_variables_initializer())\nsess.run(tf.tables_initializer())\n\ndf = pd.read_pickle(\"frame_no_stem.pkl\")\nimages = set(np.load('asin.npy')) # valid products\nprint(\"Finished reading images\")\n\nx_desc = []\ny_category = []\ni = 0\n\nfor asin in df.index.values:\n if asin in images:\n item = df.loc[asin]\n x_desc.append(item.description)\n cate = item.categories\n y_category.append(cate)\n if i % 1000 == 0:\n print(i)\n i += 1\n\nprint(\"Finished reading dataframe\")\nmlb = MultiLabelBinarizer()\ny_total = mlb.fit_transform(y_category)\nx_desc = np.array(x_desc)\n\nnp.random.seed(0)\nstate = np.random.get_state()\nnp.random.shuffle(x_desc)\nnp.random.set_state(state)\nnp.random.shuffle(y_total)\n\nX_train = x_desc[:90000]\ny_train = y_total[:90000]\ny_test = y_total[90000:]\nx_test = x_desc[90000:]\n\ntokenizer = Tokenizer(num_words=10000)\ntokenizer.fit_on_texts(X_train)\ntraining_dataX = tokenizer.texts_to_sequences(X_train)\ntest_dataX = tokenizer.texts_to_sequences(x_test)\nvocab_size = len(tokenizer.word_index) + 1\n\ntraining_dataX = np.array(training_dataX)\ntest_dataX = np.array(test_dataX)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\n# padding\nmaxlen = 300\ntraining_dataX = pad_sequences(training_dataX, padding='post', maxlen=maxlen)\ntest_dataX = pad_sequences(test_dataX, padding='post', maxlen=maxlen)\n\n# create embedding matrix\ndef create_embedding_matrix(filepath, word_index, embedding_dim):\n vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n\n with open(filepath) as f:\n for line in f:\n try:\n word, *vector = line.split()\n if word in word_index:\n idx = word_index[word] \n embedding_matrix[idx] = np.array(\n vector, dtype=np.float32)[:embedding_dim]\n except:\n continue\n\n return embedding_matrix\n\nembedding_dim = 120\nembedding_matrix = create_embedding_matrix(\"glove.840B.300d.txt\", tokenizer.word_index, embedding_dim)\n\n# Amount of words in the dataset that glove covers\nnonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))\nprint(nonzero_elements / vocab_size)\n\n# create embedding layer\nembedding_layer = Embedding(vocab_size, embedding_dim, weights=[embedding_matrix], input_length=maxlen, trainable=True)\n\nsequence_input = keras.layers.Input(shape=(maxlen,), dtype='int32')\nembedded_sequences = embedding_layer(sequence_input)\nlayer = Conv1D(200, 5, activation='relu')(embedded_sequences)\nlayer = GlobalMaxPooling1D()(embedded_sequences)\nlayer = Dense(170, activation='relu')(layer)\nlayer = Dense(122,name='out_layer',activation = \"sigmoid\")(layer)\nmodel = Model(sequence_input, layer)\n\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['acc'])\n\nmodel.summary()\n\nmodel.fit(np.array(training_dataX), y_train,epochs = 30, batch_size=256,verbose = 1)\n\noutcome = model.predict(np.array(training_dataX))\n\n# make it binary\noutcome[outcome >= 0.5] = 1\noutcome[outcome < 0.5] = 0\noutcome = outcome.astype(int)\n\nprint(f1_score(y_train,np.array(outcome),average = 'micro') * 100)\n", "sub_path": "description_classifier.py", "file_name": "description_classifier.py", "file_ext": "py", "file_size_in_byte": 4418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorflow.ConfigProto", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.backend.set_session", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.backend.set_session", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.tables_initializer", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MultiLabelBinarizer", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.random.get_state", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.random.set_state", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.count_nonzero", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 113, "usage_type": "attribute"}, {"api_name": "keras.layers.Conv1D", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "362551611", "text": "import boto3\nimport csv\nimport logging\nimport os\nimport re\nimport time\n\nfrom datetime import datetime, timedelta, timezone\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import connections, transaction, DEFAULT_DB_ALIAS\n\nfrom usaspending_api.awards.models import TransactionFPDS, TransactionNormalized, Award\nfrom usaspending_api.broker.helpers.award_category_helper import award_types\nfrom usaspending_api.broker.helpers.find_related_awards import find_related_awards\nfrom usaspending_api.broker.helpers.get_business_categories import get_business_categories\nfrom usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date\nfrom usaspending_api.broker.helpers.set_legal_entity_boolean_fields import set_legal_entity_boolean_fields\nfrom usaspending_api.common.helpers.dict_helpers import upper_case_dict_values\nfrom usaspending_api.common.helpers.etl_helpers import update_c_to_d_linkages\nfrom usaspending_api.common.helpers.date_helper import fy\nfrom usaspending_api.common.helpers.timing_helpers import timer\nfrom usaspending_api.etl.award_helpers import update_awards, update_contract_awards\nfrom usaspending_api.etl.broker_etl_helpers import dictfetchall\nfrom usaspending_api.etl.management.load_base import load_data_into_model, format_date, create_location\nfrom usaspending_api.references.models import LegalEntity, Agency\nfrom usaspending_api.common.retrieve_file_from_uri import RetrieveFileFromUri\n\nlogger = logging.getLogger(\"console\")\n\nAWARD_UPDATE_ID_LIST = []\nBATCH_FETCH_SIZE = 25000\n\n\ndef read_afa_ids_from_file(afa_id_file_path):\n with RetrieveFileFromUri(afa_id_file_path).get_file_object() as f:\n return set(tuple(l.decode(\"utf-8\").rstrip() for l in f if l))\n\n\nclass Command(BaseCommand):\n help = \"Sync USAspending DB FPDS data using Broker for new or modified records and S3 for deleted IDs\"\n\n @staticmethod\n def get_deleted_fpds_data_from_s3(date):\n ids_to_delete = []\n regex_str = \".*_delete_records_(IDV|award).*\"\n\n if settings.IS_LOCAL:\n for file in os.listdir(settings.CSV_LOCAL_PATH):\n if re.search(regex_str, file) and datetime.strptime(file[: file.find(\"_\")], \"%m-%d-%Y\").date() >= date:\n with open(settings.CSV_LOCAL_PATH + file, \"r\") as current_file:\n # open file, split string to array, skip the header\n reader = csv.reader(current_file.read().splitlines())\n next(reader)\n unique_key_list = [rows[0] for rows in reader]\n\n ids_to_delete += unique_key_list\n else:\n # Connect to AWS\n aws_region = settings.USASPENDING_AWS_REGION\n fpds_bucket_name = settings.FPDS_BUCKET_NAME\n\n if not (aws_region and fpds_bucket_name):\n raise Exception(\"Missing required environment variables: USASPENDING_AWS_REGION, FPDS_BUCKET_NAME\")\n\n s3client = boto3.client(\"s3\", region_name=aws_region)\n s3resource = boto3.resource(\"s3\", region_name=aws_region)\n s3_bucket = s3resource.Bucket(fpds_bucket_name)\n\n # make an array of all the keys in the bucket\n file_list = [item.key for item in s3_bucket.objects.all()]\n\n # Only use files that match the date we're currently checking\n for item in file_list:\n # if the date on the file is the same day as we're checking\n if (\n re.search(regex_str, item)\n and \"/\" not in item\n and datetime.strptime(item[: item.find(\"_\")], \"%m-%d-%Y\").date() >= date\n ):\n s3_item = s3client.get_object(Bucket=fpds_bucket_name, Key=item)\n reader = csv.reader(s3_item[\"Body\"].read().decode(\"utf-8\").splitlines())\n\n # skip the header, the reader doesn't ignore it for some reason\n next(reader)\n # make an array of all the detached_award_procurement_ids\n unique_key_list = [rows[0] for rows in reader]\n\n ids_to_delete += unique_key_list\n\n logger.info(\"Number of records to delete: %s\" % str(len(ids_to_delete)))\n return ids_to_delete\n\n @staticmethod\n def get_fpds_transaction_ids(date):\n db_cursor = connections[\"data_broker\"].cursor()\n db_query = \"SELECT detached_award_procurement_id FROM detached_award_procurement WHERE updated_at >= %s;\"\n db_args = [date]\n\n db_cursor.execute(db_query, db_args)\n db_rows = [id[0] for id in db_cursor.fetchall()]\n\n logger.info(\"Number of records to insert/update: %s\" % str(len(db_rows)))\n return db_rows\n\n @staticmethod\n def fetch_fpds_data_generator(dap_uid_list):\n start_time = datetime.now()\n\n db_cursor = connections[\"data_broker\"].cursor()\n\n db_query = \"SELECT * FROM detached_award_procurement WHERE detached_award_procurement_id IN ({});\"\n\n total_uid_count = len(dap_uid_list)\n\n for i in range(0, total_uid_count, BATCH_FETCH_SIZE):\n max_index = i + BATCH_FETCH_SIZE if i + BATCH_FETCH_SIZE < total_uid_count else total_uid_count\n fpds_ids_batch = dap_uid_list[i:max_index]\n\n log_msg = \"[{}] Fetching {}-{} out of {} records from broker\"\n logger.info(log_msg.format(datetime.now() - start_time, i + 1, max_index, total_uid_count))\n\n db_cursor.execute(db_query.format(\",\".join(str(id) for id in fpds_ids_batch)))\n yield dictfetchall(db_cursor) # this returns an OrderedDict\n\n @staticmethod\n def delete_stale_fpds(ids_to_delete):\n logger.info(\"Starting deletion of stale FPDS data\")\n\n transactions = TransactionNormalized.objects.filter(\n contract_data__detached_award_procurement_id__in=ids_to_delete\n )\n update_award_ids, delete_award_ids = find_related_awards(transactions)\n\n delete_transaction_ids = [delete_result[0] for delete_result in transactions.values_list(\"id\")]\n delete_transaction_str_ids = \",\".join([str(deleted_result) for deleted_result in delete_transaction_ids])\n update_award_str_ids = \",\".join([str(update_result) for update_result in update_award_ids])\n delete_award_str_ids = \",\".join([str(deleted_result) for deleted_result in delete_award_ids])\n\n db_cursor = connections[DEFAULT_DB_ALIAS].cursor()\n queries = []\n\n if delete_transaction_ids:\n fpds = \"DELETE FROM transaction_fpds tf WHERE tf.transaction_id IN ({});\".format(delete_transaction_str_ids)\n tn = \"DELETE FROM transaction_normalized tn WHERE tn.id IN ({});\".format(delete_transaction_str_ids)\n td = \"DELETE FROM transaction_delta td WHERE td.transaction_id in ({});\".format(delete_transaction_str_ids)\n queries.extend([fpds, tn, td])\n # Update Awards\n if update_award_ids:\n # Removing FK values from awards so constraints don't cause script to fail\n # Adding to AWARD_UPDATE_ID_LIST so the transaction FKs will be recalculated\n AWARD_UPDATE_ID_LIST.extend(update_award_ids)\n query_str = (\n \"UPDATE awards SET latest_transaction_id = null, earliest_transaction_id = null WHERE id IN ({});\"\n )\n update_awards_query = query_str.format(update_award_str_ids)\n queries.append(update_awards_query)\n if delete_award_ids:\n # Financial Accounts by Awards\n query_str = \"UPDATE financial_accounts_by_awards SET award_id = null WHERE award_id IN ({});\"\n fa = query_str.format(delete_award_str_ids)\n # Subawards\n sub = \"UPDATE subaward SET award_id = null WHERE award_id IN ({});\".format(delete_award_str_ids)\n # Parent Awards\n pa_updates = \"UPDATE parent_award SET parent_award_id = null WHERE parent_award_id IN ({});\".format(\n delete_award_str_ids\n )\n pa_deletes = \"DELETE FROM parent_award WHERE award_id IN ({});\".format(delete_award_str_ids)\n # Delete Subawards\n delete_awards_query = \"DELETE FROM awards a WHERE a.id IN ({});\".format(delete_award_str_ids)\n queries.extend([fa, sub, pa_updates, pa_deletes, delete_awards_query])\n if queries:\n db_query = \"\".join(queries)\n db_cursor.execute(db_query, [])\n\n def insert_all_new_fpds(self, total_insert):\n for to_insert in self.fetch_fpds_data_generator(total_insert):\n start = time.perf_counter()\n self.insert_new_fpds(to_insert=to_insert, total_rows=len(to_insert))\n logger.info(\"Insertion took {:.2f}s\".format(time.perf_counter() - start))\n\n def insert_new_fpds(self, to_insert, total_rows):\n place_of_performance_field_map = {\n \"location_country_code\": \"place_of_perform_country_c\",\n \"country_name\": \"place_of_perf_country_desc\",\n \"state_code\": \"place_of_performance_state\",\n \"state_name\": \"place_of_perfor_state_desc\",\n \"city_name\": \"place_of_perform_city_name\",\n \"county_name\": \"place_of_perform_county_na\",\n \"county_code\": \"place_of_perform_county_co\",\n \"zip_4a\": \"place_of_performance_zip4a\",\n \"congressional_code\": \"place_of_performance_congr\",\n \"zip_last4\": \"place_of_perform_zip_last4\",\n \"zip5\": \"place_of_performance_zip5\",\n }\n\n legal_entity_location_field_map = {\n \"location_country_code\": \"legal_entity_country_code\",\n \"country_name\": \"legal_entity_country_name\",\n \"state_code\": \"legal_entity_state_code\",\n \"state_name\": \"legal_entity_state_descrip\",\n \"city_name\": \"legal_entity_city_name\",\n \"county_name\": \"legal_entity_county_name\",\n \"county_code\": \"legal_entity_county_code\",\n \"address_line1\": \"legal_entity_address_line1\",\n \"address_line2\": \"legal_entity_address_line2\",\n \"address_line3\": \"legal_entity_address_line3\",\n \"zip4\": \"legal_entity_zip4\",\n \"congressional_code\": \"legal_entity_congressional\",\n \"zip_last4\": \"legal_entity_zip_last4\",\n \"zip5\": \"legal_entity_zip5\",\n }\n\n fpds_normalized_field_map = {\"type\": \"contract_award_type\", \"description\": \"award_description\"}\n\n fpds_field_map = {\n \"officer_1_name\": \"high_comp_officer1_full_na\",\n \"officer_1_amount\": \"high_comp_officer1_amount\",\n \"officer_2_name\": \"high_comp_officer2_full_na\",\n \"officer_2_amount\": \"high_comp_officer2_amount\",\n \"officer_3_name\": \"high_comp_officer3_full_na\",\n \"officer_3_amount\": \"high_comp_officer3_amount\",\n \"officer_4_name\": \"high_comp_officer4_full_na\",\n \"officer_4_amount\": \"high_comp_officer4_amount\",\n \"officer_5_name\": \"high_comp_officer5_full_na\",\n \"officer_5_amount\": \"high_comp_officer5_amount\",\n }\n\n for index, row in enumerate(to_insert, 1):\n upper_case_dict_values(row)\n\n # Create new LegalEntityLocation and LegalEntity from the row data\n legal_entity_location = create_location(\n legal_entity_location_field_map, row, {\"recipient_flag\": True, \"is_fpds\": True}\n )\n recipient_name = row[\"awardee_or_recipient_legal\"]\n legal_entity = LegalEntity.objects.create(\n recipient_unique_id=row[\"awardee_or_recipient_uniqu\"],\n recipient_name=recipient_name if recipient_name is not None else \"\",\n )\n legal_entity_value_map = {\"location\": legal_entity_location, \"is_fpds\": True}\n set_legal_entity_boolean_fields(row)\n legal_entity = load_data_into_model(legal_entity, row, value_map=legal_entity_value_map, save=True)\n\n # Create the place of performance location\n pop_location = create_location(place_of_performance_field_map, row, {\"place_of_performance_flag\": True})\n\n # Find the toptier awards from the subtier awards\n awarding_agency = Agency.get_by_subtier_only(row[\"awarding_sub_tier_agency_c\"])\n funding_agency = Agency.get_by_subtier_only(row[\"funding_sub_tier_agency_co\"])\n\n # Create the summary Award\n (created, award) = Award.get_or_create_summary_award(\n generated_unique_award_id=row[\"unique_award_key\"], piid=row[\"piid\"]\n )\n award.parent_award_piid = row.get(\"parent_award_id\")\n award.save()\n\n # Append row to list of Awards updated\n AWARD_UPDATE_ID_LIST.append(award.id)\n\n if row[\"last_modified\"] and len(str(row[\"last_modified\"])) == len(\"YYYY-MM-DD HH:MM:SS\"): # 19 characters\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n else:\n dt_fmt = \"%Y-%m-%d %H:%M:%S.%f\" # try using this even if last_modified isn't a valid string\n\n try:\n last_mod_date = datetime.strptime(str(row[\"last_modified\"]), dt_fmt).date()\n except ValueError: # handle odd-string formats and NULLs from the upstream FPDS-NG system\n info_message = \"Invalid value '{}' does not match: '{}'\".format(row[\"last_modified\"], dt_fmt)\n logger.info(info_message)\n last_mod_date = None\n\n award_type, award_type_desc = award_types(row)\n\n parent_txn_value_map = {\n \"award\": award,\n \"awarding_agency\": awarding_agency,\n \"funding_agency\": funding_agency,\n \"recipient\": legal_entity,\n \"place_of_performance\": pop_location,\n \"period_of_performance_start_date\": format_date(row[\"period_of_performance_star\"]),\n \"period_of_performance_current_end_date\": format_date(row[\"period_of_performance_curr\"]),\n \"action_date\": format_date(row[\"action_date\"]),\n \"last_modified_date\": last_mod_date,\n \"transaction_unique_id\": row[\"detached_award_proc_unique\"],\n \"is_fpds\": True,\n \"type\": award_type,\n \"type_description\": award_type_desc,\n \"business_categories\": get_business_categories(row=row, data_type=\"fpds\"),\n }\n\n transaction_normalized_dict = load_data_into_model(\n TransactionNormalized(), # thrown away\n row,\n field_map=fpds_normalized_field_map,\n value_map=parent_txn_value_map,\n as_dict=True,\n )\n\n contract_instance = load_data_into_model(\n # TransactionFPDS() is \"thrown\" away\n TransactionFPDS(),\n row,\n field_map=fpds_field_map,\n as_dict=True,\n )\n\n detached_award_proc_unique = contract_instance[\"detached_award_proc_unique\"]\n unique_fpds = TransactionFPDS.objects.filter(detached_award_proc_unique=detached_award_proc_unique)\n\n if unique_fpds.first():\n transaction_normalized_dict[\"update_date\"] = datetime.now(timezone.utc)\n transaction_normalized_dict[\"fiscal_year\"] = fy(transaction_normalized_dict[\"action_date\"])\n\n # update TransactionNormalized\n TransactionNormalized.objects.filter(id=unique_fpds.first().transaction.id).update(\n **transaction_normalized_dict\n )\n\n # update TransactionFPDS\n unique_fpds.update(**contract_instance)\n else:\n # create TransactionNormalized\n transaction = TransactionNormalized(**transaction_normalized_dict)\n transaction.save()\n\n # create TransactionFPDS\n transaction_fpds = TransactionFPDS(transaction=transaction, **contract_instance)\n transaction_fpds.save()\n\n # Update legal entity to map back to transaction\n legal_entity.transaction_unique_id = detached_award_proc_unique\n legal_entity.save()\n\n def perform_load(self, ids_to_delete, ids_to_insert):\n\n if len(ids_to_delete) > 0:\n with timer(\"deletion of all stale FPDS data\", logger.info):\n self.delete_stale_fpds(ids_to_delete=ids_to_delete)\n else:\n logger.info(\"No FPDS records to delete at this juncture\")\n\n if len(ids_to_insert) > 0:\n # Add FPDS records\n with timer(\"insertion of new FPDS data in batches\", logger.info):\n self.insert_all_new_fpds(ids_to_insert)\n\n # Update Awards based on changed FPDS records\n with timer(\"updating awards to reflect their latest associated transaction info\", logger.info):\n award_record_count = update_awards(tuple(AWARD_UPDATE_ID_LIST))\n logger.info(\"{} awards updated from their transactional data\".format(award_record_count))\n\n # Update FPDS-specific Awards based on the info in child transactions\n with timer(\"updating contract-specific awards to reflect their latest transaction info\", logger.info):\n award_record_count = update_contract_awards(tuple(AWARD_UPDATE_ID_LIST))\n logger.info(\"{} awards updated FPDS-specific and exec comp data\".format(award_record_count))\n\n # Check the linkages from file C to FPDS records and update any that are missing\n with timer(\"updating C->D linkages\", logger.info):\n update_c_to_d_linkages(\"contract\")\n else:\n logger.info(\"No FPDS records to insert or modify at this juncture\")\n\n def nightly_loader(self, start_date):\n\n logger.info(\"==== Starting FPDS nightly data load ====\")\n\n if start_date:\n date = start_date\n date = datetime.strptime(date, \"%Y-%m-%d\").date()\n else:\n default_last_load_date = datetime.now(timezone.utc) - timedelta(days=1)\n date = get_last_load_date(\"fpds\", default=default_last_load_date).date()\n processing_start_datetime = datetime.now(timezone.utc)\n\n logger.info(\"Processing data for FPDS starting from %s\" % date)\n\n with timer(\"retrieval of new/modified FPDS data ID list\", logger.info):\n ids_to_insert = self.get_fpds_transaction_ids(date=date)\n\n with timer(\"retrieval of deleted FPDS IDs\", logger.info):\n ids_to_delete = self.get_deleted_fpds_data_from_s3(date=date)\n\n self.perform_load(ids_to_delete, ids_to_insert)\n\n # Update the date for the last time the data load was run\n update_last_load_date(\"fpds\", processing_start_datetime)\n\n logger.info(\"FPDS NIGHTLY UPDATE COMPLETE\")\n\n def load_specific_transactions(self, detached_award_procurement_ids):\n logger.info(\"==== Starting FPDS (re)load of specific transactions ====\")\n\n self.perform_load(detached_award_procurement_ids, detached_award_procurement_ids)\n\n logger.info(\"FPDS SPECIFIC (RE)LOAD COMPLETE\")\n\n def add_arguments(self, parser):\n mutually_exclusive_group = parser.add_mutually_exclusive_group()\n\n mutually_exclusive_group.add_argument(\n \"--date\",\n dest=\"date\",\n type=str,\n help=\"(OPTIONAL) Date from which to start the nightly loader. Expected format: YYYY-MM-DD\",\n )\n\n mutually_exclusive_group.add_argument(\n \"--detached-award-procurement-ids\",\n nargs=\"+\",\n type=int,\n help=\"(OPTIONAL) detached_award_procurement_ids of FPDS transactions to load/reload from Broker\",\n )\n\n parser.add_argument(\n \"--id-file\",\n metavar=\"FILEPATH\",\n type=str,\n help=\"A file containing only transaction IDs (detached_award_procurement_id) \"\n \"to reload, one ID per line. Nonexistent IDs will be ignored.\",\n )\n\n @transaction.atomic\n def handle(self, *args, **options):\n if any([options[\"detached_award_procurement_ids\"], options[\"id_file\"]]):\n ids_from_file = read_afa_ids_from_file(options[\"id_file\"]) if options[\"id_file\"] else set()\n explicit_ids = (\n set(options[\"detached_award_procurement_ids\"]) if options[\"detached_award_procurement_ids\"] else set()\n )\n detached_award_procurement_ids = list(explicit_ids | ids_from_file)\n\n self.load_specific_transactions(detached_award_procurement_ids)\n else:\n self.nightly_loader(options[\"date\"])\n", "sub_path": "usaspending_api/broker/management/commands/fpds_nightly_loader.py", "file_name": "fpds_nightly_loader.py", "file_ext": "py", "file_size_in_byte": 20780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "usaspending_api.common.retrieve_file_from_uri.RetrieveFileFromUri", "line_number": 36, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.IS_LOCAL", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "django.conf.settings.CSV_LOCAL_PATH", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 49, "usage_type": "name"}, {"api_name": "re.search", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "name"}, {"api_name": "django.conf.settings.CSV_LOCAL_PATH", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 51, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 53, "usage_type": "call"}, {"api_name": "django.conf.settings.USASPENDING_AWS_REGION", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.settings.FPDS_BUCKET_NAME", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "boto3.client", "line_number": 66, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 67, "usage_type": "call"}, {"api_name": "re.search", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 96, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.connections", "line_number": 110, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "usaspending_api.etl.broker_etl_helpers.dictfetchall", "line_number": 124, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized.objects.filter", "line_number": 130, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized", "line_number": 130, "usage_type": "name"}, {"api_name": "usaspending_api.broker.helpers.find_related_awards.find_related_awards", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 140, "usage_type": "name"}, {"api_name": "django.db.DEFAULT_DB_ALIAS", "line_number": 140, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 178, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 180, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.dict_helpers.upper_case_dict_values", "line_number": 230, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.create_location", "line_number": 233, "usage_type": "call"}, {"api_name": "usaspending_api.references.models.LegalEntity.objects.create", "line_number": 237, "usage_type": "call"}, {"api_name": "usaspending_api.references.models.LegalEntity.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "usaspending_api.references.models.LegalEntity", "line_number": 237, "usage_type": "name"}, {"api_name": "usaspending_api.broker.helpers.set_legal_entity_boolean_fields.set_legal_entity_boolean_fields", "line_number": 242, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.load_data_into_model", "line_number": 243, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.create_location", "line_number": 246, "usage_type": "call"}, {"api_name": "usaspending_api.references.models.Agency.get_by_subtier_only", "line_number": 249, "usage_type": "call"}, {"api_name": "usaspending_api.references.models.Agency", "line_number": 249, "usage_type": "name"}, {"api_name": "usaspending_api.references.models.Agency.get_by_subtier_only", "line_number": 250, "usage_type": "call"}, {"api_name": "usaspending_api.references.models.Agency", "line_number": 250, "usage_type": "name"}, {"api_name": "usaspending_api.awards.models.Award.get_or_create_summary_award", "line_number": 253, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.Award", "line_number": 253, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 268, "usage_type": "name"}, {"api_name": "usaspending_api.broker.helpers.award_category_helper.award_types", "line_number": 274, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.format_date", "line_number": 282, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.format_date", "line_number": 283, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.format_date", "line_number": 284, "usage_type": "call"}, {"api_name": "usaspending_api.broker.helpers.get_business_categories.get_business_categories", "line_number": 290, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.load_data_into_model", "line_number": 293, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized", "line_number": 294, "usage_type": "call"}, {"api_name": "usaspending_api.etl.management.load_base.load_data_into_model", "line_number": 301, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionFPDS", "line_number": 303, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionFPDS.objects.filter", "line_number": 310, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionFPDS.objects", "line_number": 310, "usage_type": "attribute"}, {"api_name": "usaspending_api.awards.models.TransactionFPDS", "line_number": 310, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 313, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 313, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 313, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 313, "usage_type": "name"}, {"api_name": "usaspending_api.common.helpers.date_helper.fy", "line_number": 314, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized.objects.filter", "line_number": 317, "usage_type": "call"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized.objects", "line_number": 317, "usage_type": "attribute"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized", "line_number": 317, "usage_type": "name"}, {"api_name": "django.db.transaction", "line_number": 325, "usage_type": "name"}, {"api_name": "usaspending_api.awards.models.TransactionNormalized", "line_number": 325, "usage_type": "call"}, {"api_name": "django.db.transaction.save", "line_number": 326, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 326, "usage_type": "name"}, {"api_name": "usaspending_api.awards.models.TransactionFPDS", "line_number": 329, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 329, "usage_type": "name"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 339, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 346, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 350, "usage_type": "call"}, {"api_name": "usaspending_api.etl.award_helpers.update_awards", "line_number": 351, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 355, "usage_type": "call"}, {"api_name": "usaspending_api.etl.award_helpers.update_contract_awards", "line_number": 356, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 360, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.etl_helpers.update_c_to_d_linkages", "line_number": 361, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 371, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 371, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 373, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 373, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 373, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 373, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 373, "usage_type": "call"}, {"api_name": "usaspending_api.broker.helpers.last_load_date.get_last_load_date", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 375, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 375, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 375, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 375, "usage_type": "name"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 379, "usage_type": "call"}, {"api_name": "usaspending_api.common.helpers.timing_helpers.timer", "line_number": 382, "usage_type": "call"}, {"api_name": "usaspending_api.broker.helpers.last_load_date.update_last_load_date", "line_number": 388, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 424, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 424, "usage_type": "name"}]} +{"seq_id": "66620603", "text": "import requests\nimport multiprocessing as mp\n\ndef getproxies():\n url = \"https://api.proxyscrape.com/?request=getproxies&proxytype=socks5&timeout=10000&country=all&uptime=0\"\n r = requests.get(url)\n with open(\"unchecked.txt\", \"w\") as f :\n f.write(r.text)\n\ndef checkproxies():\n with open(\"unchecked.txt\", \"r\") as f:\n print(\"Checking Proxies...\")\n proxies = f.readlines()\n for i in proxies:\n proxy = i[ : len(i) - 1]\n try:\n requests.get(\"https://google.com\", proxies={ \"https\": \"socks5h://\" + proxy }, timeout=1000)\n print(proxy + \" OK\")\n except:\n print(proxy + \" BAD\")\n\ndef multiTasks():\n with mp.Pool(processes=5) as pool:\n pool.apply(checkproxies)\n\n\nif __name__ == \"__main__\":\n multiTasks()\n", "sub_path": "unfinished_python/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "213636722", "text": "import preprocessing as pre\nimport numpy as np\nimport pandas as pd\n\n#temporizador\nimport time\nfrom functools import wraps\n\ndef computeDecisionTreeRegressionModel(X, y):\n from sklearn.tree import DecisionTreeRegressor\n\n regressor = DecisionTreeRegressor()\n regressor.fit(X, y)\n\n return regressor\n\ndef showPlot(XPoints, yPoints, XLine, yLine):\n import matplotlib.pyplot as plt\n\n plt.scatter(XPoints, yPoints, color= 'red')\n plt.plot(XLine, yLine, color = 'blue')\n plt.title(\"Comparando pontos reais com a reta produzida pela regressão de árvore de decisão.\")\n plt.xlabel(\"Experiência em anos\")\n plt.ylabel(\"Salário\")\n plt.show()\n\ndef runDecisionTreeRegressionExample(filename):\n start_time = time.time()\n X, y, csv = pre.loadDataset(filename)\n elapsed_time = time.time() - start_time\n #print(\"Load Dataset: %.2f\" % elapsed_time, \"segundos.\")\n\n start_time = time.time()\n regressor = computeDecisionTreeRegressionModel(X, y)\n elapsed_time = time.time() - start_time\n print(\"Compute Decision Tree Regression: %.2f\" % elapsed_time, \"segundos.\")\n\n from sklearn.metrics import r2_score\n return r2_score(y, regressor.predict(X))\n\nif __name__ == \"__main__\":\n print(runDecisionTreeRegressionExample(\"salary.csv\"))\n", "sub_path": "Ep 11/regressiondecisiontree.py", "file_name": "regressiondecisiontree.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "preprocessing.loadDataset", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "326277856", "text": "from flask import Flask, render_template, request, redirect, session, url_for\nfrom flask import send_file, make_response, send_from_directory\n\napp = Flask(__name__, template_folder=\"templates\", static_url_path='/static')\napp.config['TEMPLATES_AUTO_RELOAD'] = True\napp.config['DEBUG'] = True\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "237125913", "text": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n\t搜狐新闻标题爬虫\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n# 获取页面内容并存提取保存\nres = requests.get('http://news.sina.com.cn/china/')\nres.encoding = 'utf-8'\n# print(res.text.txt)\nsoup = BeautifulSoup(res.text, 'lxml')\n# print(soup)\n\n# 获取网页 时间,标题,网页\nfor news in soup.select('.news-item'):\n if len(news.select('h2')) > 0:\n # print(news.select('h2')) # 有空的怎么办? 上if\n h2 = news.select('h2')[0].text\n time = news.select('.time')[0].text\n a = news.select('a')[0]['href']\n print(time, h2, a)\n", "sub_path": "A_库的分类/BeautifulSoup_yhz/实例2 - 搜狐网页提取.py", "file_name": "实例2 - 搜狐网页提取.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "478189839", "text": "import helpers, testly\n\nfrom collections import OrderedDict\n\nfrom pyppl import Proc\nfrom pyppl.proctree import ProcTree, ProcNode\nfrom pyppl.exception import ProcTreeProcExists, ProcTreeParseError\n\nclass TestProcNode(testly.TestCase):\n\n\tdef testInit(self):\n\t\tproc = Proc()\n\t\tself.maxDiff = None\n\t\tpn = ProcNode(proc)\n\t\tself.assertIs(pn.proc, proc)\n\t\tself.assertListEqual(pn.prev, [])\n\t\tself.assertListEqual(pn.next, [])\n\t\tself.assertEqual(pn.ran, False)\n\t\tself.assertEqual(pn.start, False)\n\t\tself.assertIn('File ', pn.defs[0])\n\n\tdef dataProvider_testSameIdTag(self):\n\t\tproc1 = Proc()\n\t\tpn1 = ProcNode(proc1)\n\t\tyield pn1, proc1, True\n\n\t\tproc2 = Proc()\n\t\tyield pn1, proc2, False\n\n\tdef testSameIdTag(self, pn, proc, out):\n\t\tself.assertEqual(pn.sameIdTag(proc), out)\n\n\tdef testRepr(self):\n\t\tproc = Proc()\n\t\tpn = ProcNode(proc)\n\t\tself.assertEqual(repr(pn), ') @ %s>' % (proc.id, proc.tag, hex(id(proc)), hex(id(pn))))\n\nclass TestProcTree(testly.TestCase):\n\n\tdef setUp(self):\n\t\t# procs registered by Proc.__init__() are also removed!\n\t\tif self.isFirst() or not self.isOfSet():\n\t\t\tProcTree.NODES = OrderedDict()\n\t\t\n\tdef dataProvider_testRegister(self):\n\t\tproc_testRegister1 = Proc()\n\t\tyield proc_testRegister1, 1\n\t\tyield proc_testRegister1, 1\n\t\tproc_testRegister2 = Proc()\n\t\tyield proc_testRegister2, 2\n\n\tdef testRegister(self, proc, l):\n\t\tProcTree.register(proc)\n\t\tself.assertIs(ProcTree.NODES[proc].proc, proc)\n\t\tself.assertEqual(len(ProcTree.NODES), l)\n\n\tdef dataProvider_testCheck(self):\n\t\tproc_testCheck1 = Proc()\n\t\tproc_testCheck2 = Proc()\n\t\tproc_testCheck3 = Proc(id = 'proc_testCheck1')\n\t\tyield proc_testCheck1, False\n\t\tyield proc_testCheck2, False\n\t\tyield proc_testCheck3, True\n\n\tdef testCheck(self, proc, r):\n\t\tProcTree.register(proc)\n\t\tif r:\n\t\t\tself.assertRaises(ProcTreeProcExists, ProcTree.check, proc)\n\t\telse:\n\t\t\tProcTree.check(proc)\n\n\tdef dataProvider_testGetPrevNextStr(self):\n\t\tproc_testGetPrevNextStr1 = Proc()\n\t\tproc_testGetPrevNextStr2 = Proc()\n\t\tproc_testGetPrevNextStr3 = Proc()\n\t\tproc_testGetPrevNextStr2.depends = proc_testGetPrevNextStr1\n\t\tproc_testGetPrevNextStr3.depends = proc_testGetPrevNextStr2\n\t\tps = [proc_testGetPrevNextStr1, proc_testGetPrevNextStr2, proc_testGetPrevNextStr3]\n\t\tyield ps, proc_testGetPrevNextStr1, 'prev', 'START'\n\t\tyield ps, proc_testGetPrevNextStr2, 'prev', '[proc_testGetPrevNextStr1]'\n\t\tyield ps, proc_testGetPrevNextStr3, 'prev', '[proc_testGetPrevNextStr2]'\n\t\tyield ps, proc_testGetPrevNextStr1, 'next', '[proc_testGetPrevNextStr2]'\n\t\tyield ps, proc_testGetPrevNextStr2, 'next', '[proc_testGetPrevNextStr3]'\n\t\tyield ps, proc_testGetPrevNextStr3, 'next', 'END'\n\n\tdef testGetPrevNextStr(self, procs, proc, which, out):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tProcTree()\n\t\tif which == 'prev':\n\t\t\tself.assertEqual(ProcTree.getPrevStr(proc), out)\n\t\telse:\n\t\t\tself.assertEqual(ProcTree.getNextStr(proc), out)\n\n\tdef dataProvider_testGetNext(self):\n\t\tproc_testGetNext1 = Proc()\n\t\tproc_testGetNext2 = Proc()\n\t\tproc_testGetNext3 = Proc()\n\t\tproc_testGetNext4 = Proc()\n\t\tproc_testGetNext2.depends = proc_testGetNext1\n\t\tproc_testGetNext3.depends = proc_testGetNext2\n\t\tproc_testGetNext4.depends = proc_testGetNext2\n\t\tps = [proc_testGetNext1, proc_testGetNext2, proc_testGetNext3, proc_testGetNext4]\n\t\tyield ps, proc_testGetNext1, [proc_testGetNext2]\n\t\tyield ps, proc_testGetNext2, [proc_testGetNext3, proc_testGetNext4]\n\t\tyield ps, proc_testGetNext3, []\n\t\tyield ps, proc_testGetNext4, []\n\n\tdef testGetNext(self, procs, proc, outs):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tProcTree()\n\t\tnexts = ProcTree.getNext(proc)\n\t\tself.assertCountEqual(nexts, outs)\n\n\tdef dataProvider_testReset(self):\n\t\tproc_testReset1 = Proc()\n\t\tproc_testReset2 = Proc()\n\t\tproc_testReset3 = Proc()\n\t\tproc_testReset4 = Proc()\n\t\tproc_testReset2.depends = proc_testReset1\n\t\tproc_testReset3.depends = proc_testReset2\n\t\tproc_testReset4.depends = proc_testReset2\n\t\tyield [proc_testReset1, proc_testReset2, proc_testReset3, proc_testReset4], \n\n\tdef testReset(self, procs):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tProcTree()\n\t\tProcTree.reset()\n\t\tfor node in ProcTree.NODES.values():\n\t\t\tself.assertListEqual(node.prev, [])\n\t\t\tself.assertListEqual(node.next, [])\n\t\t\tself.assertFalse(node.ran)\n\t\t\tself.assertFalse(node.start)\n\n\tdef dataProvider_testInit(self):\n\t\tproc_testInit1 = Proc()\n\t\tproc_testInit2 = Proc()\n\t\tproc_testInit3 = Proc()\n\t\tproc_testInit4 = Proc()\n\t\tproc_testInit2.depends = proc_testInit1\n\t\tproc_testInit3.depends = proc_testInit2\n\t\tproc_testInit4.depends = proc_testInit2\n\t\tyield [proc_testInit1, proc_testInit2, proc_testInit3, proc_testInit4], \n\t\tyield [proc_testInit1, proc_testInit3], \n\n\tdef testInit(self, procs):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tself.assertEqual(pt.starts, [])\n\t\tself.assertEqual(pt.ends, [])\n\t\tfor proc in procs:\n\t\t\tdepends = proc.depends\n\t\t\tfor depend in depends:\n\t\t\t\tnproc = ProcTree.NODES[proc]\n\t\t\t\tndepend = ProcTree.NODES[depend]\n\t\t\t\tself.assertIn(nproc, ndepend.next)\n\t\t\t\tself.assertIn(ndepend, nproc.prev)\n\n\tdef dataProvider_testSetGetStarts(self):\n\t\tproc_testSetGetStarts1 = Proc()\n\t\tproc_testSetGetStarts2 = Proc()\n\t\tproc_testSetGetStarts3 = Proc()\n\t\tproc_testSetGetStarts4 = Proc()\n\t\tproc_testSetGetStarts2.depends = proc_testSetGetStarts1\n\t\tproc_testSetGetStarts3.depends = proc_testSetGetStarts2\n\t\tproc_testSetGetStarts4.depends = proc_testSetGetStarts2\n\t\tyield [proc_testSetGetStarts1, proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts1]\n\t\tyield [proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts2]\n\t\tyield [proc_testSetGetStarts1, proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts1, proc_testSetGetStarts2]\n\n\tdef testSetGetStarts(self, procs, starts):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tfor proc in procs:\n\t\t\tif proc in starts:\n\t\t\t\tself.assertTrue(ProcTree.NODES[proc].start)\n\t\t\telse:\n\t\t\t\tself.assertFalse(ProcTree.NODES[proc].start)\n\t\ts = pt.getStarts()\n\t\tself.assertCountEqual(s, starts)\n\t\tself.assertCountEqual(pt.starts, starts)\n\n\tdef dataProvider_testGetPaths(self):\n\t\tproc_testGetPaths1 = Proc()\n\t\tproc_testGetPaths2 = Proc()\n\t\tproc_testGetPaths3 = Proc()\n\t\tproc_testGetPaths4 = Proc()\n\t\tproc_testGetPaths5 = Proc()\n\t\tproc_testGetPaths2.depends = proc_testGetPaths1\n\t\tproc_testGetPaths3.depends = proc_testGetPaths2, proc_testGetPaths4\n\t\tproc_testGetPaths4.depends = proc_testGetPaths2\n\t\tproc_testGetPaths5.depends = proc_testGetPaths1\n\t\t\"\"\"\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testGetPaths1, proc_testGetPaths2, proc_testGetPaths3, proc_testGetPaths4, proc_testGetPaths5]\n\t\tyield ps, proc_testGetPaths1, []\n\t\tyield ps, proc_testGetPaths2, [[proc_testGetPaths1]]\n\t\tyield ps, proc_testGetPaths3, [[proc_testGetPaths2, proc_testGetPaths1], [proc_testGetPaths4, proc_testGetPaths2, proc_testGetPaths1]]\n\t\tyield ps, proc_testGetPaths4, [[proc_testGetPaths2, proc_testGetPaths1]]\n\t\tyield ps, proc_testGetPaths5, [[proc_testGetPaths1]]\n\n\t\tproc_testGetPaths6 = Proc()\n\t\tproc_testGetPaths7 = Proc()\n\t\tproc_testGetPaths8 = Proc()\n\t\tproc_testGetPaths7.depends = proc_testGetPaths6\n\t\tproc_testGetPaths8.depends = proc_testGetPaths7\n\t\tproc_testGetPaths6.depends = proc_testGetPaths8\n\t\tps2 = [proc_testGetPaths6, proc_testGetPaths7, proc_testGetPaths8]\n\t\tyield ps2, proc_testGetPaths6, [], True\n\n\t\tproc_testGetPaths10 = Proc()\n\t\tproc_testGetPaths11 = Proc()\n\t\tproc_testGetPaths12 = Proc()\n\t\tproc_testGetPaths11.depends = proc_testGetPaths10\n\t\tproc_testGetPaths12.depends = proc_testGetPaths11\n\t\tproc_testGetPaths10.depends = proc_testGetPaths11\n\t\tps3 = [proc_testGetPaths10, proc_testGetPaths11, proc_testGetPaths12]\n\t\tyield ps3, proc_testGetPaths12, [], True\n\n\t\t# should be ok: \n\t\t# 13 -> 15\n\t\t# 14 -> 15\n\t\t# 13 -> 14\n\t\tproc_testGetPaths13 = Proc()\n\t\tproc_testGetPaths14 = Proc()\n\t\tproc_testGetPaths15 = Proc()\n\t\tproc_testGetPaths15.depends = proc_testGetPaths13, proc_testGetPaths14\n\t\tproc_testGetPaths14.depends = proc_testGetPaths13\n\t\tps4 = [proc_testGetPaths13, proc_testGetPaths14, proc_testGetPaths15]\n\t\tyield ps4, proc_testGetPaths15, [[proc_testGetPaths13], [proc_testGetPaths14, proc_testGetPaths13]]\n\n\n\tdef testGetPaths(self, procs, proc, paths, exception = None):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tif exception:\n\t\t\tself.assertRaises(ProcTreeParseError, pt.getPaths, proc)\n\t\telse:\n\t\t\tps = pt.getPaths(proc)\n\t\t\tself.assertListEqual(ps, paths)\n\n\tdef dataProvider_testGetPathsToStarts(self):\n\t\tproc_testGetPathsToStarts1 = Proc()\n\t\tproc_testGetPathsToStarts2 = Proc()\n\t\tproc_testGetPathsToStarts3 = Proc()\n\t\tproc_testGetPathsToStarts4 = Proc()\n\t\tproc_testGetPathsToStarts5 = Proc()\n\t\tproc_testGetPathsToStarts2.depends = proc_testGetPathsToStarts1\n\t\tproc_testGetPathsToStarts3.depends = proc_testGetPathsToStarts2, proc_testGetPathsToStarts4\n\t\tproc_testGetPathsToStarts4.depends = proc_testGetPathsToStarts2\n\t\tproc_testGetPathsToStarts5.depends = proc_testGetPathsToStarts1\n\t\t\"\"\"\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testGetPathsToStarts1, proc_testGetPathsToStarts2, proc_testGetPathsToStarts3, proc_testGetPathsToStarts4, proc_testGetPathsToStarts5]\n\t\tyield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts1, []\n\t\tyield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts2, [[proc_testGetPathsToStarts1]]\n\t\tyield ps, [proc_testGetPathsToStarts2], proc_testGetPathsToStarts2, []\n\t\tyield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]\n\t\tyield ps, [proc_testGetPathsToStarts1, proc_testGetPathsToStarts4], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]\n\t\tyield ps, [proc_testGetPathsToStarts2], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2]]\n\t\tyield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts4, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]\n\t\tyield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts5, [[proc_testGetPathsToStarts1]]\n\n\tdef testGetPathsToStarts(self, procs, starts, proc, paths):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tps = pt.getPathsToStarts(proc)\n\t\tself.assertListEqual(ps, paths)\n\n\tdef dataProvider_testCheckPath(self):\n\t\tproc_testCheckPath0 = Proc()\n\t\tproc_testCheckPath1 = Proc()\n\t\tproc_testCheckPath2 = Proc()\n\t\tproc_testCheckPath3 = Proc()\n\t\tproc_testCheckPath4 = Proc()\n\t\tproc_testCheckPath5 = Proc()\n\t\tproc_testCheckPath2.depends = proc_testCheckPath0, proc_testCheckPath1\n\t\tproc_testCheckPath3.depends = proc_testCheckPath2, proc_testCheckPath4\n\t\tproc_testCheckPath4.depends = proc_testCheckPath2\n\t\tproc_testCheckPath5.depends = proc_testCheckPath1\n\t\t\"\"\"\n\t\t\tproc0\n\t\t\t\t\\\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testCheckPath0, proc_testCheckPath1, proc_testCheckPath2, proc_testCheckPath3, proc_testCheckPath4, proc_testCheckPath5]\n\t\tyield ps, [proc_testCheckPath1], proc_testCheckPath1, True\n\t\tyield ps, [proc_testCheckPath1], proc_testCheckPath2, [proc_testCheckPath0]\n\t\tyield ps, [proc_testCheckPath0, proc_testCheckPath1], proc_testCheckPath2, True\n\t\tyield ps, [proc_testCheckPath0, proc_testCheckPath1], proc_testCheckPath3, True\n\t\tyield ps, [proc_testCheckPath0], proc_testCheckPath3, [proc_testCheckPath2, proc_testCheckPath1]\n\n\tdef testCheckPath(self, procs, starts, proc, passed):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tif isinstance(passed, bool):\n\t\t\tself.assertEqual(pt.checkPath(proc), passed)\n\t\telse:\n\t\t\tself.assertListEqual(pt.checkPath(proc), passed)\n\n\tdef dataProvider_testGetEnds(self):\n\t\t# check for loops\n\t\tproc_testGetEnds_loop0 = Proc()\n\t\tproc_testGetEnds_loop1 = Proc()\n\t\tproc_testGetEnds_loop2 = Proc()\n\t\tproc_testGetEnds_loop3 = Proc()\n\t\tproc_testGetEnds_loop1.depends = proc_testGetEnds_loop0\n\t\tproc_testGetEnds_loop2.depends = proc_testGetEnds_loop1\n\t\tproc_testGetEnds_loop3.depends = proc_testGetEnds_loop2\n\t\tproc_testGetEnds_loop0.depends = proc_testGetEnds_loop1\n\t\t\"\"\"\n\t\t0 -> 1 -> 2 -> 3\n\t\t|____|\n\t\t\"\"\"\n\t\tyield [proc_testGetEnds_loop0, proc_testGetEnds_loop1, proc_testGetEnds_loop2, proc_testGetEnds_loop3], [proc_testGetEnds_loop3], [], ProcTreeParseError, 'Loop dependency'\n\n\t\tproc_testGetEnds_loop4 = Proc()\n\t\tproc_testGetEnds_loop5 = Proc()\n\t\tproc_testGetEnds_loop6 = Proc()\n\t\tproc_testGetEnds_loop7 = Proc()\n\t\tproc_testGetEnds_loop5.depends = proc_testGetEnds_loop4\n\t\tproc_testGetEnds_loop6.depends = proc_testGetEnds_loop5\n\t\tproc_testGetEnds_loop7.depends = proc_testGetEnds_loop6\n\t\tproc_testGetEnds_loop4.depends = proc_testGetEnds_loop7\n\t\t\"\"\"\n\t\t4 -> 5 -> 6 -> 7\n\t\t|______________|\n\t\t\"\"\"\n\t\tyield [proc_testGetEnds_loop4, proc_testGetEnds_loop5, proc_testGetEnds_loop6, proc_testGetEnds_loop7], [proc_testGetEnds_loop7], [], ProcTreeParseError, 'Loop dependency'\n\n\t\tproc_testGetEnds0 = Proc()\n\t\tproc_testGetEnds1 = Proc()\n\t\tproc_testGetEnds2 = Proc()\n\t\tproc_testGetEnds3 = Proc()\n\t\tproc_testGetEnds4 = Proc()\n\t\tproc_testGetEnds5 = Proc()\n\t\tproc_testGetEnds2.depends = proc_testGetEnds0, proc_testGetEnds1\n\t\tproc_testGetEnds3.depends = proc_testGetEnds2, proc_testGetEnds4\n\t\tproc_testGetEnds4.depends = proc_testGetEnds2\n\t\tproc_testGetEnds5.depends = proc_testGetEnds1\n\t\t\"\"\"\n\t\t\tproc0\n\t\t\t\t\\\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testGetEnds0, proc_testGetEnds1, proc_testGetEnds2, proc_testGetEnds3, proc_testGetEnds4, proc_testGetEnds5]\n\n\t\tyield ps, [proc_testGetEnds5], [], ProcTreeParseError, 'one of the paths cannot go through'\n\t\tyield ps, [proc_testGetEnds1], [proc_testGetEnds5]\n\t\tyield ps, [proc_testGetEnds0, proc_testGetEnds1], [proc_testGetEnds3, proc_testGetEnds5]\n\t\tyield ps, [proc_testGetEnds0], [], ProcTreeParseError, 'one of the paths cannot go through'\n\n\t\tproc_testGetEnds6 = Proc()\n\t\tyield [proc_testGetEnds6], [proc_testGetEnds6], [proc_testGetEnds6]\n\t\tyield [proc_testGetEnds6], [], [], ProcTreeParseError, 'Failed to determine end processes by start processes'\n\n\n\tdef testGetEnds(self, procs, starts, ends, exception = None, msg = None):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tif exception:\n\t\t\tself.assertRaisesRegex(ProcTreeParseError, msg, pt.getEnds)\n\t\telse:\n\t\t\tself.assertCountEqual(pt.getEnds(), ends)\n\n\tdef dataProvider_testGetAllPaths(self):\n\t\tproc_testGetAllPaths0 = Proc()\n\t\tproc_testGetAllPaths1 = Proc()\n\t\tproc_testGetAllPaths2 = Proc()\n\t\tproc_testGetAllPaths3 = Proc()\n\t\tproc_testGetAllPaths4 = Proc()\n\t\tproc_testGetAllPaths5 = Proc()\n\t\tproc_testGetAllPaths2.depends = proc_testGetAllPaths0, proc_testGetAllPaths1\n\t\tproc_testGetAllPaths3.depends = proc_testGetAllPaths2, proc_testGetAllPaths4\n\t\tproc_testGetAllPaths4.depends = proc_testGetAllPaths2\n\t\tproc_testGetAllPaths5.depends = proc_testGetAllPaths1\n\t\t\"\"\"\n\t\t\tproc0\n\t\t\t\t\\\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5]\n\t\tyield ps, [proc_testGetAllPaths2], [[proc_testGetAllPaths3, proc_testGetAllPaths2], [proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2]]\n\t\tyield ps, [proc_testGetAllPaths1], [[proc_testGetAllPaths5, proc_testGetAllPaths1]]\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [\n\t\t\t[proc_testGetAllPaths5, proc_testGetAllPaths1],\n\t\t\t[proc_testGetAllPaths3, proc_testGetAllPaths2, proc_testGetAllPaths0],\n\t\t\t[proc_testGetAllPaths3, proc_testGetAllPaths2, proc_testGetAllPaths1],\n\t\t\t[proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2, proc_testGetAllPaths0],\n\t\t\t[proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2, proc_testGetAllPaths1],\n\t\t]\n\n\t\t# obsolete\n\t\tproc_testGetAllPaths6 = Proc()\n\t\tyield [proc_testGetAllPaths6], [proc_testGetAllPaths6], [[proc_testGetAllPaths6]]\n\n\tdef testGetAllPaths(self, procs, starts, paths):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tself.assertCountEqual(pt.getAllPaths(), paths)\n\n\tdef dataProvider_testGetNextToRun(self):\n\t\tproc_testGetAllPaths0 = Proc()\n\t\tproc_testGetAllPaths1 = Proc()\n\t\tproc_testGetAllPaths2 = Proc()\n\t\tproc_testGetAllPaths3 = Proc()\n\t\tproc_testGetAllPaths4 = Proc()\n\t\tproc_testGetAllPaths5 = Proc()\n\t\tproc_testGetAllPaths2.depends = proc_testGetAllPaths0, proc_testGetAllPaths1\n\t\tproc_testGetAllPaths3.depends = proc_testGetAllPaths2, proc_testGetAllPaths4\n\t\tproc_testGetAllPaths4.depends = proc_testGetAllPaths2\n\t\tproc_testGetAllPaths5.depends = proc_testGetAllPaths1\n\t\t\"\"\"\n\t\t\tproc0\n\t\t\t\t\\\n\t\tproc1 -> proc2 -> proc3\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5]\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0], [], proc_testGetAllPaths0\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [], proc_testGetAllPaths0\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1], proc_testGetAllPaths2\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths5], proc_testGetAllPaths4\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths4, proc_testGetAllPaths5], proc_testGetAllPaths3\n\t\t\n\t\tyield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5], None\n\n\tdef testGetNextToRun(self, procs, starts, haveran, out):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\tfor hr in haveran:\n\t\t\tProcTree.NODES[hr].ran = True\n\t\tself.assertIs(pt.getNextToRun(), out)\n\n\tdef dataProvider_testUnranProcs(self):\n\t\tproc_testUnranProcs0 = Proc()\n\t\tproc_testUnranProcs1 = Proc()\n\t\tproc_testUnranProcs2 = Proc()\n\t\tproc_testUnranProcs3 = Proc()\n\t\tproc_testUnranProcs4 = Proc()\n\t\tproc_testUnranProcs5 = Proc()\n\t\tproc_testUnranProcs6 = Proc()\n\t\tproc_testUnranProcs7 = Proc()\n\t\tproc_testUnranProcs2.depends = proc_testUnranProcs0, proc_testUnranProcs1\n\t\tproc_testUnranProcs3.depends = proc_testUnranProcs2, proc_testUnranProcs4\n\t\tproc_testUnranProcs4.depends = proc_testUnranProcs2\n\t\tproc_testUnranProcs5.depends = proc_testUnranProcs1\n\t\tproc_testUnranProcs6.depends = proc_testUnranProcs0\n\t\t\"\"\"\n\t\t\tproc0 -> proc6\n\t\t\t\t\\\n\t\tproc1 -> proc2 -> proc3 proc7\n\t\t\t\\ \\ /\n\t\t\t proc5 proc4\n\t\t\"\"\"\n\t\tps = [proc_testUnranProcs0, proc_testUnranProcs1, proc_testUnranProcs2, proc_testUnranProcs3, proc_testUnranProcs4, proc_testUnranProcs5, proc_testUnranProcs6, proc_testUnranProcs7]\n\t\tyield ps, [proc_testUnranProcs0], {\n\t\t\t'proc_testUnranProcs3': ['proc_testUnranProcs2', 'proc_testUnranProcs1']\n\t\t}\n\t\tyield ps, [proc_testUnranProcs1], {\n\t\t\t'proc_testUnranProcs3': ['proc_testUnranProcs2', 'proc_testUnranProcs0']\n\t\t}\n\n\tdef testUnranProcs(self, procs, starts, outs):\n\t\tfor p in procs:\n\t\t\tProcTree.register(p)\n\t\tpt = ProcTree()\n\t\tpt.setStarts(starts)\n\t\t# run the pipeline\n\t\tp = pt.getNextToRun()\n\t\twhile p:\n\t\t\tProcTree.NODES[p].ran = True\n\t\t\tp = pt.getNextToRun()\n\t\tself.assertDictEqual(pt.unranProcs(), outs)\n\nif __name__ == '__main__':\n\ttestly.main(verbosity=2, failfast = True)", "sub_path": "tests/testProcTree.py", "file_name": "testProcTree.py", "file_ext": "py", "file_size_in_byte": 19805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "testly.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyppl.Proc", "line_number": 12, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcNode", "line_number": 14, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 23, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcNode", "line_number": 24, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 27, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 34, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcNode", "line_number": 35, "usage_type": "call"}, {"api_name": "testly.TestCase", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 43, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 43, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 46, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 49, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 53, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 53, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 54, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 55, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 58, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 59, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 60, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 66, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 66, "usage_type": "name"}, {"api_name": "pyppl.exception.ProcTreeProcExists", "line_number": 68, "usage_type": "argument"}, {"api_name": "pyppl.proctree.ProcTree.check", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 68, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.check", "line_number": 70, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 70, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 73, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 74, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 75, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 88, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 88, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 89, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.getPrevStr", "line_number": 91, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 91, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.getNextStr", "line_number": 93, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 93, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 96, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 97, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 98, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 99, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 111, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 111, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 112, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.getNext", "line_number": 113, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 113, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 117, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 118, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 119, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 120, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 128, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 128, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 129, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.reset", "line_number": 130, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 130, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.NODES.values", "line_number": 131, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 131, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 138, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 139, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 140, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 141, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 150, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 150, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 151, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 157, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 158, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 163, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 164, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 165, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 166, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 176, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 176, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 177, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 181, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 183, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 189, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 190, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 191, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 192, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 193, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 210, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 211, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 212, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 219, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 220, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 221, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 232, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 233, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 234, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 243, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 243, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 244, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 246, "usage_type": "argument"}, {"api_name": "pyppl.Proc", "line_number": 252, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 253, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 254, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 255, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 256, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 278, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 278, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 279, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 285, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 286, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 287, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 288, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 289, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 290, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 311, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 311, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 312, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 321, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 322, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 323, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 324, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 333, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 335, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 336, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 337, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 338, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 347, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 349, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 350, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 351, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 352, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 353, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 354, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 368, "usage_type": "name"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 371, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 373, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 375, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 380, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 380, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 381, "usage_type": "call"}, {"api_name": "pyppl.exception.ProcTreeParseError", "line_number": 384, "usage_type": "argument"}, {"api_name": "pyppl.Proc", "line_number": 389, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 390, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 391, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 392, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 393, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 394, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 418, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 423, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 423, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 424, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 429, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 430, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 431, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 432, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 433, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 434, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 462, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 462, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 463, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 466, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 466, "usage_type": "name"}, {"api_name": "pyppl.Proc", "line_number": 470, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 471, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 472, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 473, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 474, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 475, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 476, "usage_type": "call"}, {"api_name": "pyppl.Proc", "line_number": 477, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.register", "line_number": 500, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 500, "usage_type": "name"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 501, "usage_type": "call"}, {"api_name": "pyppl.proctree.ProcTree.NODES", "line_number": 506, "usage_type": "attribute"}, {"api_name": "pyppl.proctree.ProcTree", "line_number": 506, "usage_type": "name"}, {"api_name": "testly.main", "line_number": 511, "usage_type": "call"}]} +{"seq_id": "584756143", "text": "from xml.dom import minidom\nimport json\nimport time\nimport datetime\n\n# article_data = \"test_article.xml\";\narticle_data = \"/home/konstantina/data/semeval/articles-training-20180831.xml\";\n# ground_truth_data = \"test_article.ground_truth.xml\";\nground_truth_data = \"/home/konstantina/data/semeval/ground-truth-training-20180831.xml\";\n\ndef get_node_text(nodelist):\n rc = [];\n for node in nodelist:\n # from documentation\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data);\n else:\n # recursive\n rc.append(get_node_text(node.childNodes));\n return ''.join(rc);\n\nstart_time = time.time();\narticles_xml = minidom.parse(article_data, bufsize=None); # takes forever for big file\n# tree = etree.parse(article_data); # this as well\nprint(str(datetime.timedelta(seconds=time.time() - start_time)));\narticle_nodes = articles_xml.getElementsByTagName('article');\nprint(\"{} articles in this file\".format(len(article_nodes)));\nlabels_xml = minidom.parse(ground_truth_data);\narticle_nodes_with_labels = labels_xml.getElementsByTagName('article');\narticle_label_map = dict(); # from doc id to binary hyperpartisanship\nfor article_label in article_nodes_with_labels:\n article_label_map[article_label.attributes['id'].value]= article_label.attributes['hyperpartisan'].value;\narticles_json = dict(); # to transform xml file to json file\narticles_json[\"articles\"] = list();\nfor a in article_nodes:\n article_id = a.attributes['id'].value;\n article_title = a.attributes['title'].value;\n article_date = a.attributes['published-at'].value;\n # print(\"id={}, published-at={}, title={}\".format(article_id, article_date, article_title));\n paragraphs = a.getElementsByTagName('p');\n # print(\"{} paragraphs\".format(len(paragraphs)));\n article_text = \"\";\n for p in paragraphs:\n article_text += get_node_text(p.childNodes);\n # print(\"Article:\\n{}\".format(article_text));\n # print(\"Hyperpartisan:\\n{}\".format(article_label_map[a.attributes['id'].value]));\n current_article_json = dict();\n current_article_json[\"id\"] = article_id;\n current_article_json[\"published-at\"] = article_date;\n current_article_json[\"title\"] = article_title;\n current_article_json[\"text\"] = article_text;\n current_article_json[\"hyperpartisan\"] = article_label_map[a.attributes['id'].value];\n articles_json[\"articles\"].append(current_article_json);\n if len(articles_json)%1000 == 0:\n print(\"{} articles in dict\".format(len(articles_json)));\n# print(articles_json);\nwith open(\"/home/konstantina/data/semeval/\" +\n article_data.split(\"/\")[len(article_data.split(\"/\"))-1] + \".json\", 'wb') as outputf:\n json.dump(articles_json, outputf);\n\n", "sub_path": "semeval/xml_to_json_slow.py", "file_name": "xml_to_json_slow.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 23, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 28, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "643970079", "text": "# Copyright 2014-2015 Canonical Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Remote Python Debugger (pdb wrapper).\"\"\"\n\nimport pdb\nimport socket\nimport sys\n\n__author__ = \"Bertrand Janin \"\n__version__ = \"0.1.3\"\n\n\nclass Rpdb(pdb.Pdb):\n\n def __init__(self, addr=\"127.0.0.1\", port=4444):\n \"\"\"Initialize the socket and initialize pdb.\"\"\"\n\n # Backup stdin and stdout before replacing them by the socket handle\n self.old_stdout = sys.stdout\n self.old_stdin = sys.stdin\n\n # Open a 'reusable' socket to let the webapp reload on the same port\n self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n self.skt.bind((addr, port))\n self.skt.listen(1)\n (clientsocket, address) = self.skt.accept()\n handle = clientsocket.makefile('rw')\n pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)\n sys.stdout = sys.stdin = handle\n\n def shutdown(self):\n \"\"\"Revert stdin and stdout, close the socket.\"\"\"\n sys.stdout = self.old_stdout\n sys.stdin = self.old_stdin\n self.skt.close()\n self.set_continue()\n\n def do_continue(self, arg):\n \"\"\"Stop all operation on ``continue``.\"\"\"\n self.shutdown()\n return 1\n\n do_EOF = do_quit = do_exit = do_c = do_cont = do_continue\n", "sub_path": "hooks/charmhelpers/fetch/python/rpdb.py", "file_name": "rpdb.py", "file_ext": "py", "file_size_in_byte": 1910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pdb.Pdb", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 32, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 35, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 35, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 35, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 36, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pdb.Pdb.__init__", "line_number": 41, "usage_type": "call"}, {"api_name": "pdb.Pdb", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "328948535", "text": "import imaplib, email\nimport re\n\n\ndef config(user, password, imap_url, box_select):\n con = imaplib.IMAP4_SSL(imap_url)\n con.login(user, password)\n con.select(box_select)\n return con\n\n\ndef get_body(e_mail):\n if e_mail.is_multipart():\n return get_body(e_mail.get_payload(0))\n else:\n e_mail.get_payload(None, True)\n\n\ndef search(key, value,con):\n result, data = con.search(None, key, \"{}\".format(value))\n return data\n\n\ndef get_emails(result_bytes, con):\n e_mails = []\n for num in result_bytes[0].split():\n typ, data = con.fetch(num, '(RFC822)')\n data = email.message_from_bytes(data[0][1])\n e_mails.append(data)\n return e_mails\n\n\ndef get_attechment(e_mail,filename):\n import os\n for part in e_mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n file = part.get_filename()\n extension = \".\" + file.split('.')[1]\n if bool(file):\n with open(file, 'wb') as f:\n f.write(part.get_payload(decode=True))\n os.rename(file,filename + extension)\n\n\ncon = config(\"kunjvadodariya040798@gmail.com\", \"Kunjvadodariya040798#\", \"imap.gmail.com\", 'INBOX')\nsearch = search(None, \"ALL\",con)\ne_mails = get_emails(search,con)\n\nfor e_mail in e_mails:\n if re.match(\"(Date_of_)([0-2][0-9]|(3)[0-1])(-)(((0)[0-9])|((1)[0-2]))(-)\\d{4}#(\\d)+\" , e_mail['Subject']):\n filename = \"date\" + str(e_mail['Subject'].split('#')[0][7:]) + \"_\" + str(e_mail['Subject'].split('#')[1])\n get_attechment(e_mail,filename)\n\n\n\n", "sub_path": "Python/Tasks/Read_Mail/imap.py", "file_name": "imap.py", "file_ext": "py", "file_size_in_byte": 1624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "imaplib.IMAP4_SSL", "line_number": 6, "usage_type": "call"}, {"api_name": "email.message_from_bytes", "line_number": 28, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 45, "usage_type": "call"}, {"api_name": "re.match", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "141092386", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpRequest\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport cx_Oracle\nimport os\nfrom django.views import generic\nfrom django.db import connections\n\n\ndef get_roam_countries(request, msisdn):\n cursor = connections['ppcdb_pooled'].cursor()\n cur = cursor.connection.cursor()\n\n i_subs_id=None\n i_int_request_id=1\n i_client_app_type=\"MyTcell_Lite_Web\"\n i_client_app_version=\"v1\"\n o_st_id = cur.var(cx_Oracle.NUMBER)\n o_roam_countries = cursor.connection.cursor()\n o_exit_location_id = cur.var(cx_Oracle.STRING)\n o_responce_id = cur.var(cx_Oracle.NUMBER)\n o_result = cur.var(cx_Oracle.NUMBER)\n o_err_msg = cur.var(cx_Oracle.STRING)\n\n cur.callproc('mytcell_lite_pack.get_roam_countries', (\n i_subs_id, msisdn, i_int_request_id, i_client_app_type, i_client_app_version, o_st_id,\n o_roam_countries, o_exit_location_id, o_responce_id, o_result, o_err_msg))\n\n columns = [i[0] for i in o_roam_countries.description]\n\n countries={}\n subs_type={}\n countries['list']=[dict(zip(columns, row)) for row in o_roam_countries]\n subs_type['subs_type']=o_st_id.getvalue()\n countries['subs_info']=[subs_type]\n\n countries['results']=[{\n 'o_exit_location_id' : o_exit_location_id.getvalue(),\n 'o_responce_id' : int(o_responce_id.getvalue()),\n 'err_code' : int(o_result.getvalue()),\n 'err_msg' : o_err_msg.getvalue()\n }]\n\n cur.close()\n cursor.close()\n\n return render(request, \"mytcell_lite_app/roaming.html\", context=countries)\n", "sub_path": "mytcell_lite_app/roam_countries_view_pooled.py", "file_name": "roam_countries_view_pooled.py", "file_ext": "py", "file_size_in_byte": 1715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.connections", "line_number": 14, "usage_type": "name"}, {"api_name": "cx_Oracle.NUMBER", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cx_Oracle.STRING", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cx_Oracle.NUMBER", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cx_Oracle.NUMBER", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cx_Oracle.STRING", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "276959929", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/hanzz/releases/odcs/server/odcs/server/events.py\n# Compiled at: 2018-06-04 03:42:23\nfrom threading import Lock\nfrom sqlalchemy.orm import attributes\nfrom logging import getLogger\nlog = getLogger()\n_cache_lock = Lock()\n_cached_composes = {}\n\ndef cache_composes_if_state_changed(session, flush_context):\n \"\"\"Prepare outgoing messages when compose state is changed\"\"\"\n from odcs.server.models import Compose\n composes = (item for item in session.new | session.dirty if isinstance(item, Compose))\n composes_state_changed = (compose for compose in composes if not attributes.get_history(compose, 'state').unchanged)\n with _cache_lock:\n for comp in composes_state_changed:\n if comp.id not in _cached_composes:\n _cached_composes[comp.id] = []\n _cached_composes[comp.id].append(comp.json())\n\n log.debug('Cached composes to be sent due to state changed: %s', _cached_composes.keys())\n\n\ndef start_to_publish_messages(session):\n \"\"\"Publish messages after data is committed to database successfully\"\"\"\n import odcs.server.messaging as messaging\n with _cache_lock:\n msgs = []\n for compose_jsons in _cached_composes.values():\n for compose_json in compose_jsons:\n msgs.append({'event': 'state-changed', \n 'compose': compose_json})\n\n log.debug('Sending messages: %s', msgs)\n if msgs:\n try:\n messaging.publish(msgs)\n except Exception:\n log.exception('Cannot publish message to bus.')\n\n _cached_composes.clear()", "sub_path": "pycfiles/odcs-0.2.46.tar/events.py", "file_name": "events.py", "file_ext": "py", "file_size_in_byte": 1760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 11, "usage_type": "call"}, {"api_name": "odcs.server.models.Compose", "line_number": 17, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.attributes.get_history", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.attributes", "line_number": 18, "usage_type": "name"}, {"api_name": "odcs.server.messaging.publish", "line_number": 41, "usage_type": "call"}, {"api_name": "odcs.server.messaging", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "422932979", "text": "# coding=utf-8\n\nimport abc\nimport datetime\nimport io\nimport logging\nimport importlib\nfrom pathlib import Path\nfrom queue import Queue\nfrom string import Template\nfrom typing import Optional, Iterable, Generator\nfrom multiprocessing import Pool\n\n\nimport requests\n\nimport arcturus.ArcturusSources.Source as Source\nfrom .import ArcturusSources\nfrom .Blacklist import Blacklist\nfrom .Post import Post\nfrom .Taglist import Query\n\nNAME = \"Arcturus\"\n\nPYTHON_REQUIRED_MAJOR = 3\nPYTHON_REQUIRED_MINOR = 6\n\n\n\n\nclass ArcturusError(Exception):\n \"\"\"base exception class for all Arcturus exceptions\"\"\"\n\nclass ArcturusCore:\n \"\"\"central class of the program which takes configuration information and downloads from a data source\"\"\"\n\n def __init__(self,\n source: Source,\n taglist: Iterable[Query],\n download_dir: Path,\n lastrun: Optional[datetime.date],\n blacklist: Optional[Blacklist],\n cache: Optional[io.TextIOBase],\n **kwargs\n ):\n\n # required args\n self._source = source\n self._taglist = taglist\n self._download_dir = download_dir\n\n # optional args\n self._lastrun = lastrun\n self._blacklist = blacklist\n self._cache = cache\n self._threads = kwargs.get('download_threads', 4)\n self._nameformat = kwargs.get('download_nameformat', \"${artist}_${md5}.${ext}\")\n self._kwargs = kwargs\n\n self._log = logging.getLogger()\n\n # attributes\n self._pending_downloads = Queue()\n\n @classmethod\n def import_arcturus_source(cls, source_name):\n return importlib.import_module(f'.ArcturusSources.{source_name}', __package__)\n\n def _get_posts(self) -> Generator[Post, None, None]:\n for line in self._taglist:\n lastrun = self._lastrun\n if line.ignore_lastrun:\n lastrun = None\n for post in self._source.get_posts(query=line.text, alias=line.alias, lastrun=lastrun):\n\n # these are the individual images / movies / files\n\n # it has been previously downloaded. don't download it again\n if self._cache and post.md5 in self._cache:\n continue\n\n # if we have a blacklist and this shouldn't be downloaded based on it, skip it\n if self._blacklist and self._blacklist.is_blacklisted(post.tags):\n continue\n\n yield post\n\n def _download_single(self, post: Post):\n\n filename = Template(self._nameformat).substitute(post.__dict__)\n destination = self._download_dir / Path(filename)\n response = requests.get(post.url, stream=True)\n handle = open(destination, \"wb\")\n for chunk in response.iter_content(chunk_size=512):\n if chunk: # filter out keep-alive new chunks\n handle.write(chunk)\n\n def _print_post(self, post: Post):\n print(post.url)\n\n def update(self, namefmt: Optional[str], download_method=_download_single):\n for x in self._get_posts():\n print(x)\n\n p = Pool(1)\n p.map(download_method, self._get_posts())\n", "sub_path": "arcturus/ArcturusCore.py", "file_name": "ArcturusCore.py", "file_ext": "py", "file_size_in_byte": 3206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "arcturus.ArcturusSources.Source", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 39, "usage_type": "name"}, {"api_name": "Taglist.Query", "line_number": 39, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "Blacklist.Blacklist", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "io.TextIOBase", "line_number": 43, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 60, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 63, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 69, "usage_type": "name"}, {"api_name": "Post.Post", "line_number": 69, "usage_type": "name"}, {"api_name": "Post.Post", "line_number": 88, "usage_type": "name"}, {"api_name": "string.Template", "line_number": 90, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 91, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 92, "usage_type": "call"}, {"api_name": "Post.Post", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 101, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "161931935", "text": "# Databricks notebook source\n\n\n# COMMAND ----------\n\nfrom datetime import datetime\nimport pytz\nfrom pytz import timezone\nmnt = \"/mnt/entadls\"\nDLLocation = mnt+\"/curated/internal/product/rgis/\"\nDLLocationArchive = mnt+\"/curated/internal/product/rgis/archive/\"\nfileName = \"RGISProductDetails.csv\"\ntodaydate = datetime.now(tz=pytz.utc).astimezone(timezone('US/Pacific')).strftime('%Y-%m-%d')\nNewFileName = \"RGISProductDetails_\"+todaydate+\".csv\"\n# FileLocation = \"wasbs://\"+STORAGE_CONTAINER+\"@\"+STORAGE_ACCOUNT_ENV\n# containerfiles = dbutils.fs.ls(FileLocation)\nDLFiles = dbutils.fs.ls(DLLocation)\nfor file in DLFiles:\n if fileName in file.name:\n print(f'File {fileName} exists')\n break\n #Archive the existing file in DL\n# dbutils.fs.mv(DLLocation+fileName, DLLocationArchive+NewFileName)\n# dbutils.fs.cp(file.path, DLLocation)\n \n\n# COMMAND ----------\n\n\n", "sub_path": "C1-SIT3/mplk_automation/rgis/RGISProductDetails.py", "file_name": "RGISProductDetails.py", "file_ext": "py", "file_size_in_byte": 872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "610063663", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division # the result of a division will be a float\n\nimport Tweet_Functions\nimport conf\nimport pydisque\nfrom pydisque.client import Client\nimport json\nimport time\nfrom threading import Thread, RLock\nfrom pymongo import MongoClient\n\nclient_mongo = MongoClient()\ndb = client_mongo.algo\nverrou = RLock()\n\nclass Algo(Thread):\n\tqueue = ''\n\n\tdef __init__(self, name, client):\n\t\tThread.__init__(self)\n\t\tself.queue = name\n\t\tself.client = client\n\t\tself.num_tweets = 0\n\t\tself.tot = 0\n\n\tdef run(self):\n\t\tprint('>>> new analyse:', self.queue)\n\t\twhile True:\n\t\t\tjobs = self.client.get_job([self.queue])\n\t\t\tfor queue_name, job_id, tweet_struct in jobs:\n\t\t\t\twith verrou:\n\t\t\t\t\ttweet_brut = json.loads(tweet_struct.decode('utf-8'))\n\t\t\t\t\ttweet_brut['original'] = ''\n\t\t\t\t\tif (str(tweet_brut['retweeted']) != 'true'):\n\t\t\t\t\t\ttext = str(tweet_brut['text'])\n\t\t\t\t\t\tlang = str(tweet_brut['lang'])\n\t\t\t\t\t\ttweet = Tweet_Functions.Tweet(text, lang)\n\t\t\t\t\t\tresult_job = tweet.Process()\n\t\t\t\t\t\tif result_job != 0:\n\t\t\t\t\t\t\tself.num_tweets += 1\n\t\t\t\t\t\t\tself.tot += float(result_job['positivity'])\n\t\t\t\t\t\t\taverage = self.tot / self.num_tweets\n\t\t\t\t\t\t\ttweet_brut['positivity'] = float(result_job['positivity'])\n\t\t\t\t\t\t\ttweet_brut['average'] = average\n\t\t\t\t\t\t\ttweet_brut['top_word'] = result_job['top_word']\n\t\t\t\t\t\t\tself.client.add_job('toProcess_res', json.dumps(tweet_brut), timeout=100)\n\t\t\t\t\t\t\tself.client.ack_job(job_id)\n\t\t\t\t\t\t\tAlgo.stock_ML(job_id.decode('utf-8'), text, result_job['positivity'])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttweet_brut['original'] = tweet_brut['retweeted_status']['id_str']\n\t\t\t\t\t\ttweet_brut['positivity'] = 0.0\n\t\t\t\t\t\ttweet_brut['average'] = average\n\t\t\t\t\t\ttweet_brut['top_word'] = ''\n\t\t\t\t\t\tself.client.add_job('toProcess_res', json.dumps(tweet_brut), timeout=100)\n\t\t\t\t\t\tself.client.ack_job(job_id)\n\n\tdef stock_ML(id, tweet, positivity):\n\t\t# enregistrement en base du tweet\n\t\ttweets = db.tweetml\n\t\tprint('>>> id:', id, '>>> tweet:', tweet, '>>> positivity:', float(positivity))\n\t\tif tweets.find({ 'idTweet': id }).count() == 0:\n\t\t\ttweets.insert({\n\t\t\t\t'idTweet': id,\n\t\t\t\t'tweet': tweet,\n\t\t\t\t'value': float(positivity)\n\t\t\t})\n\t\telse:\n\t\t\ttweets.update({ 'idTweet': id }, { '$set': { 'value': float(positivity) }})\n", "sub_path": "Sentiment_Analysis.py", "file_name": "Sentiment_Analysis.py", "file_ext": "py", "file_size_in_byte": 2243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pymongo.MongoClient", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.RLock", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 23, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "Tweet_Functions.Tweet", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "341244024", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport nsml\nfrom nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML\nfrom dataset import KinQueryDataset, preprocess\n\n\n# DONOTCHANGE: They are reserved for nsml\n# This is for nsml leaderboard\ndef bind_model(sess, config):\n # 학습한 모델을 저장하는 함수입니다.\n def save(dir_name, *args):\n # directory\n os.makedirs(dir_name, exist_ok=True)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(dir_name, 'model'))\n\n # 저장한 모델을 불러올 수 있는 함수입니다.\n def load(dir_name, *args):\n saver = tf.train.Saver()\n # find checkpoint\n ckpt = tf.train.get_checkpoint_state(dir_name)\n if ckpt and ckpt.model_checkpoint_path:\n checkpoint = os.path.basename(ckpt.model_checkpoint_path)\n saver.restore(sess, os.path.join(dir_name, checkpoint))\n\n else:\n raise NotImplemented('No checkpoint!')\n print('Model loaded')\n\n def infer(raw_data, **kwargs):\n\n left_preprocessed_data, right_preprocessed_data = preprocess(raw_data, config.strmaxlen)\n # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다\n pred = sess.run(output, feed_dict={x_1: left_preprocessed_data, x_2: right_preprocessed_data})\n clipped = np.array(pred > config.threshold, dtype=np.int)\n return list(zip(pred.flatten(), clipped.flatten()))\n\n # DONOTCHANGE: They are reserved for nsml\n # nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef _batch_loader(iterable, n=1):\n \"\"\"\n 데이터를 배치 사이즈만큼 잘라서 보내주는 함수입니다. PyTorch의 DataLoader와 같은 역할을 합니다\n\n :param iterable: 데이터 list, 혹은 다른 포맷\n :param n: 배치 사이즈\n :return:\n \"\"\"\n length = len(iterable)\n for n_idx in range(0, length, n):\n yield iterable.left_data[n_idx:min(n_idx + n, length)],\\\n iterable.right_data[n_idx:min(n_idx + n , length)], iterable.labels[n_idx:min(n_idx + n , length)]\n\n\n# Don't use\ndef conv_block(k_size=5, h_size=64, input=None):\n # Layer1 64 * 32 * 64 with 2 layer\n Weight = tf.Variable(tf.random_normal([k_size, k_size, 1, h_size], stddev=0.01))\n Layer = tf.nn.conv2d(input, Weight, strides=[1, 1, 1, 1], padding=\"SAME\")\n Layer = tf.nn.relu(Layer)\n Layer = tf.layers.batch_normalization(Layer)\n Layer = tf.nn.max_pool(Layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n return Layer\n\n\ndef inception2d(inputs, input_channel, channel_size):\n\n # bias = tf.Variable(tf.constant(0.1, shape=[channel_size]))\n\n first_weight = tf.Variable(tf.truncated_normal([1, 1, input_channel, channel_size]))\n first_layer = tf.nn.conv2d(inputs, first_weight, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n second_weight = tf.Variable(tf.truncated_normal([3, 3, input_channel, channel_size]))\n second_layer = tf.nn.conv2d(inputs, second_weight, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n third_weight = tf.Variable(tf.truncated_normal([5, 5, input_channel, channel_size]))\n third_layer = tf.nn.conv2d(inputs, third_weight, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n # pooling = tf.nn.avg_pool(inputs, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')\n\n outputs = tf.concat([first_layer, second_layer, third_layer], axis=3)\n # outputs = tf.nn.bias_add(outputs, bias)\n outputs = tf.nn.relu(outputs)\n\n return outputs\n\n\ndef model(embedded):\n\n # ====================== Conv Block 64, 128, 256, 512 =======================\n layer1 = inception2d(embedded, 1, 8)\n layer1 = tf.layers.batch_normalization(layer1)\n layer1 = tf.nn.max_pool(layer1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n layer2 = inception2d(layer1, 24, 32)\n layer2 = tf.layers.batch_normalization(layer2)\n layer2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n layer3 = inception2d(layer2, 96, 128)\n layer3 = tf.layers.batch_normalization(layer3)\n layer3 = tf.nn.max_pool(layer3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n layer4 = inception2d(layer3, 384, 512)\n layer4 = tf.layers.batch_normalization(layer4)\n layer4 = tf.nn.max_pool(layer4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n layer5 = inception2d(layer4, 1536, 2048)\n layer5 = tf.layers.batch_normalization(layer5)\n\n # =============== FC Layer 1 ===================\n weight6 = tf.Variable(tf.random_normal([4 * 1 * 512 * 12, 1024], stddev=0.01))\n fc_layer6 = tf.reshape(layer5, [-1, 4 * 1 * 512 * 12])\n fc_layer6 = tf.matmul(fc_layer6, weight6)\n fc_layer6 = tf.nn.relu(fc_layer6)\n\n return fc_layer6\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n # DONOTCHANGE: They are reserved for nsml\n args.add_argument('--mode', type=str, default='train')\n args.add_argument('--pause', type=int, default=0)\n args.add_argument('--iteration', type=str, default='0')\n\n # User options\n args.add_argument('--output', type=int, default=1)\n args.add_argument('--epochs', type=int, default=10)\n args.add_argument('--batch', type=int, default=2048)\n args.add_argument('--strmaxlen', type=int, default=64)\n args.add_argument('--embedding', type=int, default=16)\n args.add_argument('--threshold', type=float, default=0.5)\n config = args.parse_args()\n\n if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml\n DATASET_PATH = '../sample_data/kin/'\n\n # 모델의 specification\n input_size = config.embedding*config.strmaxlen # 32 * 256\n learning_rate = 0.000001\n character_size = 256\n strmaxlen = config.strmaxlen\n embedding = config.embedding\n\n x_1 = tf.placeholder(tf.int32, [None, strmaxlen])\n x_2 = tf.placeholder(tf.int32, [None, strmaxlen])\n y_ = tf.placeholder(tf.float32, [None, 1])\n\n # embedding..\n char_embedding = tf.get_variable('char_embedding', [character_size, embedding, 1])\n embedded_1 = tf.nn.embedding_lookup(char_embedding, x_1)\n embedded_2 = tf.nn.embedding_lookup(char_embedding, x_2)\n\n # create different models..\n model_1 = model(embedded=embedded_1) # 1024 * 1\n model_2 = model(embedded=embedded_2) # 1024 * 1\n\n # Concatenate 2 model\n Weight7 = tf.Variable(tf.random_normal([1024 * 2, 2048], stddev=0.01))\n FC1 = tf.matmul(tf.concat([model_1, model_2], 1), Weight7)\n FC1 = tf.nn.relu(FC1)\n\n Weight8 = tf.Variable(tf.random_normal([2048, 1024], stddev=0.01))\n FC2 = tf.matmul(FC1, Weight8)\n FC2 = tf.nn.relu(FC2)\n\n Weight9 = tf.Variable(tf.random_normal([1024, 1], stddev=0.01))\n output = tf.matmul(FC2, Weight9)\n\n # output_sigmoid = tf.nn.sigmoid(output)\n\n # loss와 optimizer\n binary_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=output))\n # rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_, output_sigmoid))))\n\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(binary_cross_entropy)\n\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n # DONOTCHANGE: Reserved for nsml\n bind_model(sess=sess, config=config)\n\n # DONOTCHANGE: Reserved for nsml\n if config.pause:\n nsml.paused(scope=locals())\n\n if config.mode == 'train':\n # 데이터를 로드합니다.\n dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)\n dataset_len = len(dataset)\n one_batch_size = dataset_len//config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n # epoch마다 학습을 수행합니다.\n for epoch in range(config.epochs):\n avg_loss = 0.0\n for i, (left_data, right_data, labels) in enumerate(_batch_loader(dataset, config.batch)):\n _, loss = sess.run([train_step, binary_cross_entropy], feed_dict={x_1: left_data, x_2: right_data, y_: labels})\n # _, right_loss = sess.run([train_step, rmse], feed_dict={x: left_data, y_: labels})\n loss = float(loss)\n\n print('Batch : ', i + 1, '/', one_batch_size, ', RMSE in this minibatch: ', loss)\n avg_loss += loss\n print('epoch:', epoch, ' train_loss:', avg_loss/one_batch_size)\n nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,\n train__loss=avg_loss/one_batch_size, step=epoch)\n # DONOTCHANGE (You can decide how often you want to save the model)\n nsml.save(epoch)\n\n # 로컬 테스트 모드일때 사용합니다\n # 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다.\n # [(0.3, 0), (0.7, 1), ... ]\n elif config.mode == 'test_local':\n with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:\n queries = f.readlines()\n res = []\n for batch in _batch_loader(queries, config.batch):\n temp_res = nsml.infer(batch)\n res += temp_res\n print(res)", "sub_path": "model/model_test_3.py", "file_name": "model_test_3.py", "file_ext": "py", "file_size_in_byte": 10282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dataset.preprocess", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 63, "usage_type": "attribute"}, {"api_name": "nsml.bind", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 132, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 135, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 151, "usage_type": "call"}, {"api_name": "nsml.HAS_DATASET", "line_number": 166, "usage_type": "name"}, {"api_name": "nsml.IS_ON_NSML", "line_number": 166, "usage_type": "name"}, {"api_name": "nsml.DATASET_PATH", "line_number": 167, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 177, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 177, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 178, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 182, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 196, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 207, "usage_type": "attribute"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 209, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 210, "usage_type": "call"}, {"api_name": "nsml.paused", "line_number": 217, "usage_type": "call"}, {"api_name": "dataset.KinQueryDataset", "line_number": 221, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 221, "usage_type": "argument"}, {"api_name": "nsml.report", "line_number": 237, "usage_type": "call"}, {"api_name": "nsml.save", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 246, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "nsml.infer", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "525764865", "text": "\nimport sys\nimport os\nimport re\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\nimport seaborn as sns\nsns.set()\n\nbdir = \"/opt/fspdivaprj/xray-1/\"\n#bdir = \"/Users/pankaj.petkar/dev/acc/x-ray-1/\"\nodir = bdir + \"data/visuals/\"\nfdir = odir + \"final/\"\nidir = \"/hotest/dfs01t/work/xray/nm_final/test/\"\n\nc_ts_dur_count = \"g_c_ts_dur_count\"\n\ntags = ['cmp','xfr','pd','def']\ni_ext = \".png\"\n\ndur_fld = 'call_dur'\nlabel_fld = 'call_label'\nts_fld = 'ts1'\ninterval1 = 'month'\ninterval2 = 'dayofweek'\n\ntag_prefix = '_tag_'\n\ndf_dict = {}\nentity_tag = None\n\nXPONUM8X = 'XPONUM8X'\nXSRNUM4X = 'XSRNUM4X'\nXSRNUM5X = 'XSRNUM5X'\nadv_prof_na = 'adv_prof-na'\nadv_prof_prcs_prcd = 'adv_prof-prcs_prcd'\nclnt_prof_na = 'clnt_prof-na'\ncmpt_na = 'cmpt-na'\ncmpt_cmpt_anly_na = 'cmpt_cmpt_anly-na'\nfin_plng_na = 'fin_plng-na'\nmrkt_trng_fin_plng = 'mrkt_trng-fin_plng'\nmrkt_trng_na = 'mrkt_trng-na'\nprcs_prcd_uw_fin_plng = 'prcs_prcd_uw-fin_plng'\nprcs_prcd_uw_na = 'prcs_prcd_uw-na'\nprcs_prcd_uw_sytm_tngy = 'prcs_prcd_uw-sytm_tngy'\nprd_fetr_bnft_cmpt_cmpt_anly = 'prd_fetr_bnft-cmpt_cmpt_anly'\nprd_fetr_bnft_fin_plng = 'prd_fetr_bnft-fin_plng'\nprd_fetr_bnft_mrkt_trng = 'prd_fetr_bnft-mrkt_trng'\nprd_fetr_bnft_na = 'prd_fetr_bnft-na'\nprd_fetr_bnft_prcs_prcd_uw = 'prd_fetr_bnft-prcs_prcd_uw'\nprd_fetr_bnft_sale_oprt_strg_cros_sell = 'prd_fetr_bnft-sale_oprt_strg_cros_sell'\nprd_fetr_bnft_sytm_tngy = 'prd_fetr_bnft-sytm_tngy'\nsale_oprt_strg_cros_sell_fin_plng = 'sale_oprt_strg_cros_sell-fin_plng'\nsale_oprt_strg_cros_sell_na = 'sale_oprt_strg_cros_sell-na'\nsale_oprt_strg_cros_sell_sytm_tngy = 'sale_oprt_strg_cros_sell-sytm_tngy'\nsytm_tngy_na = 'sytm_tngy-na'\nPAD = 'PAD'\ncmpt_cmpt_anly_sytm_tngy = 'cmpt_cmpt_anly-sytm_tngy'\nprd_fetr_bnft_adv_prof = 'prd_fetr_bnft-adv_prof'\n\ndef loadAllDF():\n print(\"##Loading all dataframes...\")\n \n #set all variables. \n \n filelist = os.listdir(fdir) \n for fname in filelist:\n #load required DF\n m = re.match(\"(.*?).csv\",fname)\n if m: \n entity_tag = m.group(1)\n print(\"##Processing entity_tag[{}]\".format(entity_tag))\n else:\n print(\"###file pattern match failed for [{}]\".format(fname))\n continue\n \n df1 = pd.read_csv( fdir + fname)\n df_dict[entity_tag] = df1\n #print(df1.head(2))\n \n return df_dict \n \ndef explore2():\n print(\"##explore2 starting\")\n \n #set all variables. \n idir = \"/opt/fspdivaprj/xray-1/data/nwm/\"\n ifile = \"final_data.csv\"\n \n df1 = pd.read_csv( idir + ifile)\n print(\"#rows in df[{}]\".format(df1.conv_id.count()))\n #df1 = df1[(df1.sent_label == df1.call_tag) & (df1.call_tag != '_tag_unk1')] \n #print(\"#rows in df[{}]\".format(df1.conv_id.count()))\n print(df1.head(2))\n \n #look at call level pred_label's \n print(\"##----------------------------------------------------------\");\n print(\"##Call level ner tags counts\")\n #df2 = df1.groupby(['conv_id','pred_label'])['wv'].count().reset_index()\n df2 = df1[df1.call_tag == '_tag_pd'].groupby(['conv_id','pred_label'])['wv'].count().reset_index()\n df2 = df2.sort_values(['wv'],ascending=[False]) \n print(df2.head(15))\n print(\"Total calls - \",df2.conv_id.nunique()) \n print(\"Calls with wv count > 1 - \",df2[df2.wv > 1].conv_id.nunique()) \n print(\"Unique pred_labels with wv count more then 1 - \",df2[df2.wv > 1].pred_label.nunique()) \n df3 = df2.groupby(['pred_label'])['wv'].mean().reset_index()\n print(df3.head(50))\n \n #look at ner tag type based on call_tags\n print(\"##----------------------------------------------------------\");\n print(\"##ner tags type based on call tags\")\n call_tags = df1.call_tag.unique()\n #call_tags = ['_tag_cmp']\n ner_labels = df1.call_tag.unique()\n #ner_labels = ['mrkt_trng-na']\n ner_labels = ['cmpt_cmpt_anly-na']\n for call_tag in call_tags:\n print(\"----call_tag[{}]-------\".format(call_tag))\n #df3 = df1.groupby(['call_tag','pred_label'])['wv'].count().reset_index()\n df3 = df1[(df1.call_tag == call_tag) & (df1.pred_label.isin(ner_labels))].groupby(['pred_label','word'])['wv'].count().reset_index()\n df3 = df3.sort_values(['wv'],ascending=[False]) \n print(df3.head(3))\n #print(df3[df3.word.str.startswith('sen')].head(50))\n \ndef analyzeNERTagData(df_dict,call_tag_filter,entity,grp1,grp2):\n print(\"##Analzying ner data call[{}] entity[{}] grp1[{}] grp2[{}]...\".format(call_tag_filter,entity,grp1,grp2))\n \n #set all variables. \n cmpt = df_dict[entity]\n cmpt_anly = df_dict[grp1]\n clnt_prof = df_dict[grp2]\n #adv_prof = df_dict[adv_prof_na]\n col_names = ['wv_x','wv_y','wv','freq'] \n rec_limit = 3\n ind_key_for_rec_limit = 2\n o_df = pd.DataFrame(columns=col_names)\n \n df1 = pd.merge(cmpt,cmpt_anly,how='inner',left_on='conv_id',right_on='conv_id')\n #print(df1.head(15))\n #print(\"rowcount[{}]\".format(df1['conv_id'].count()))\n #print(df1.columns)\n df1 = pd.merge(df1,clnt_prof,how='inner',left_on='conv_id',right_on='conv_id')\n #print(df1.head(15))\n #print(df1.columns)\n #print(\"rowcount[{}]\".format(df1['conv_id'].count()))\n df1 = df1[df1['call_tag_x'] == call_tag_filter].groupby(col_names[:-1])[col_names[-1]].sum().reset_index()\n df1 = df1.sort_values([col_names[0],col_names[1],col_names[-1]],ascending=[True,True,False]) \n #df1 = df1.sort_values([col_names[-1]],ascending=[False]) \n #print(df1.head(15))\n #print(df1.columns)\n \n key = None\n prev_key = None \n grp_rec_cnt = 0\n i = 0\n \n for ind,rec in df1.iterrows():\n #if i > 30:\n # break\n grp_rec_cnt += 1\n key = rec[col_names[0]] + '|' + rec[col_names[1]]\n if prev_key == None:\n prev_key = key\n if prev_key != key:\n grp_rec_cnt = 1\n #print(i,prev_key,key,grp_rec_cnt)\n if grp_rec_cnt <= rec_limit:\n #print(rec[col_names[0]],rec[col_names[1]],rec[col_names[2]],rec[col_names[3]])\n #o_df.loc[o_df.shape[0]] = [rec[col_names[0]],rec[col_names[1]],rec[col_names[2]],rec[col_names[-1]]]\n o_df.loc[o_df.shape[0]] = rec[:]\n prev_key = key\n i += 1\n \n print(o_df.head(20))\n ofname = \"res_\" + call_tag_filter + \"_\" + entity + \"_\" + grp1 + \"_df.csv\"\n print(\"##Printing result to [{}]\".format(ofname))\n o_df.to_csv( odir + ofname)\n \n #print(\" Before rows[{}]\".format(df1['conv_id'].count()))\n \ndef processData():\n print(\"##Processing data...\")\n #df_dict = loadAllDF()\n #analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,adv_prof_na) \n #analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,prd_fetr_bnftr_na) \n #analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,prd_fetr_bnft_na) \n explore2()\n \nif __name__ == \"__main__\":\n processData()\n", "sub_path": "src/utils/nwm_util.py", "file_name": "nwm_util.py", "file_ext": "py", "file_size_in_byte": 6637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 12, "usage_type": "attribute"}, {"api_name": "seaborn.set", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 72, "usage_type": "call"}, {"api_name": "re.match", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "191558967", "text": "import random\nimport uuid\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import User\n\nfrom .statements.ifs import STATEMENTS as IF_STATEMENTS\nfrom .statements.thens import STATEMENTS as THEN_STATEMENTS\nfrom .constants import GameState\nfrom .exceptions import ValidationError\n\n\nclass Character(models.Model):\n \"\"\"\n Model that holds information to represent a character\n A Character can join a game and become a Player\n \"\"\"\n\n guid = models.UUIDField(\n default=uuid.uuid4, editable=False, help_text=\"GUID of this character\"\n )\n user = models.ForeignKey(\n User, on_delete=models.CASCADE, help_text=\"The user this character belongs to\"\n )\n exp = models.IntegerField(default=1, help_text=\"Total experience of this player\")\n level = models.IntegerField(default=1, help_text=\"Level of this player\")\n hp = models.IntegerField(default=20, help_text=\"Total health points of this player\")\n talent_points = models.IntegerField(\n default=20, help_text=\"Total available talent points of this player\"\n )\n\n def consume_exp(self, exp):\n \"\"\"\n Consumes given additional experience and level up if conditions are met\n \"\"\"\n self.exp = self.exp + exp\n self.save()\n if False: # figure out exp/level model\n self.level_up()\n\n def level_up(self):\n \"\"\"\n Update the stats of this player according to current level:\n level +1 for every level\n hp +1 for every level\n talent_points +1 for every 2 levels\n \"\"\"\n self.level = self.level + 1\n self.hp = self.hp + 1\n if self.level % 2 == 0:\n self.talent_points = self.talent_points + 1\n self.save()\n\n\nclass PlayerManager(models.Manager):\n \"\"\"\n Manager method provides helper method for model Player\n \"\"\"\n def create_player(self):\n \"\"\"\n Create a new player with random stats\n \"\"\"\n total_points = 30 # Set to 30 for now\n attack = random.randint(0, total_points)\n total_points = total_points - attack\n defense = random.randint(0, total_points)\n total_points = total_points - defense\n agility = total_points\n hp = 30 # Set to 30 for now\n return self.get_queryset().create(\n attack=attack, defense=defense, agility=agility, hp=hp\n )\n\n\nclass Player(models.Model):\n \"\"\"\n Model that holds information to represent a player\n A Player only last through one game\n \"\"\"\n\n guid = models.UUIDField(\n default=uuid.uuid4, editable=False, help_text=\"GUID of this player\"\n )\n attack = models.IntegerField(\n blank=False, null=False, help_text=\"Attack point of this player\"\n )\n defense = models.IntegerField(\n blank=False, null=False, help_text=\"Defense point of this player\"\n )\n agility = models.IntegerField(\n blank=False, null=False, help_text=\"Agility point of this player\"\n )\n hp = models.IntegerField(\n blank=False, null=False, help_text=\"Current health point of this player\"\n )\n\n objects = PlayerManager()\n\n def __str__(self):\n \"\"\"\n String representation of this player\n \"\"\"\n return (\n f\"HP: {self.hp} | Attack: {self.attack} | \"\n f\"Defense: {self.defense} | Agility: {self.agility}\"\n )\n\n def save(self, force=False, *args, **kwargs):\n \"\"\"\n Set this model to be immutable - we never want the stats to be accidentally modified\n \"\"\"\n if self.pk and not force:\n raise ValidationError(\"Son, just don't\")\n super().save(*args, **kwargs)\n\n @property\n def is_dead(self):\n \"\"\"\n Property that returns whether this player is dead\n \"\"\"\n return self.hp <= 0\n\n\nclass Game(models.Model):\n \"\"\"\n Model holds information of a game\n \"\"\"\n\n guid = models.UUIDField(\n default=uuid.uuid4, editable=False, help_text=\"GUID of this game\"\n )\n player1 = models.ForeignKey(\n Player,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"Player 1 of this game\",\n )\n player1_user = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"The user player1 represents\",\n )\n player2 = models.ForeignKey(\n Player,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"Player 2 of this game\",\n )\n player2_user = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"The user player2 represents\",\n )\n winner = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"won_games\",\n help_text=\"Winner of the game\",\n )\n loser = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"lost_games\",\n help_text=\"Loser of the game\",\n )\n is_draw = models.BooleanField(default=False)\n\n def __str__(self):\n \"\"\"\n String representation of this game\n \"\"\"\n if self.is_draw:\n return \"Game ended. Draw game\"\n if self.winner:\n return f\"Game ended. Winner: {self.winner} | Loser: {self.loser}\"\n return \"Game is still going\"\n\n @property\n def state(self):\n \"\"\"\n Get the current state of this game\n \"\"\"\n if self.player1_user is None and self.player2_user is None:\n return GameState.EMPTY_GAME.value\n if self.player1_user is None or self.player2_user is None:\n return GameState.HALF_JOINED_GAME.value\n # Now we know all players are joined\n if self.player1 is None and self.player2 is None:\n return GameState.SETUP_GAME.value\n if self.player1 is None or self.player2 is None:\n return GameState.HALF_SETUP_GAME.value\n if self.winner:\n return GameState.END_GAME.value\n if self.is_draw:\n return GameState.DRAW_GAME.value\n last_move = self.get_last_move()\n if last_move is None or last_move.is_complete:\n return GameState.GENERATE_MOVE.value\n if last_move.if_statement == \"\" and last_move.then_statement == \"\":\n return GameState.WAITING_FOR_TWO_MOVES.value\n if last_move.if_statement == \"\" or last_move.then_statement == \"\":\n return GameState.WAITING_FOR_ONE_MOVE.value\n\n def get_last_move(self):\n \"\"\"\n Get latest move on the given game\n \"\"\"\n return self.move_set.order_by(\"id\").last()\n\n def join(self, user):\n \"\"\"\n Join the given user to this game if that's possible\n ValidationError will be thrown if not\n \"\"\"\n game_state = self.state\n if not (\n game_state == GameState.EMPTY_GAME.value\n or game_state == GameState.HALF_JOINED_GAME.value\n ):\n raise ValidationError(message=\"The game cannot be joined right now\")\n if not user.is_authenticated:\n raise ValidationError(message=\"You must sign in in order to join the game\")\n if self.player1_user == user or self.player2_user == user:\n raise ValidationError(message=\"You have already joined this game\")\n if self.player1_user is None:\n self.player1_user = user\n return self.save()\n if self.player1_user is not None and self.player2_user is None:\n self.player2_user = user\n self.save()\n return self.setup(user)\n raise ValidationError(message=\"You cannot join since the game is full\")\n\n def setup(self, user):\n \"\"\"\n Allow user to distribute stats on their own\n FOR NOW, SKIPPING THAT AND PRESET THE SAME STATS FOR BOTH PLAYERS\n \"\"\"\n self.player1 = Player.objects.create_player()\n self.player2 = Player.objects.create_player()\n self.save()\n self.generate_empty_move()\n\n def generate_empty_move(self):\n if self.state != GameState.GENERATE_MOVE.value:\n raise ValidationError(\"A move cannot be generated right now\")\n self.move_set.create(\n if_user=self.player1_user,\n if_statement_options=\",\".join(random.sample(list(IF_STATEMENTS.keys()), 3)),\n then_user=self.player2_user,\n then_statement_options=\",\".join(\n random.sample(list(THEN_STATEMENTS.keys()), 3)\n ),\n )\n\n def make_move(self, user, statement_id):\n game_state = self.state\n if not (\n game_state == GameState.WAITING_FOR_ONE_MOVE.value\n or game_state == GameState.WAITING_FOR_TWO_MOVES.value\n ):\n raise ValidationError(\"A move cannot be made right now\")\n last_move = self.get_last_move()\n if last_move.if_user == user:\n if last_move.if_statement == \"\":\n if statement_id in last_move.if_statement_options:\n last_move.if_statement = statement_id\n last_move.save()\n else:\n raise ValidationError(\n \"You can only choose a move from available moves\"\n )\n else:\n raise ValidationError(\"You cannot retract your previous choice\")\n elif last_move.then_user == user:\n if last_move.then_statement == \"\":\n if statement_id in last_move.then_statement_options:\n last_move.then_statement = statement_id\n last_move.save()\n else:\n raise ValidationError(\n \"You can only choose a move from available moves\"\n )\n else:\n raise ValidationError(\"you cannot retract your previous choice\")\n last_move.refresh_from_db()\n if last_move.is_complete:\n self.play()\n\n def play_single_move(self, move):\n \"\"\"\n Evaluate a single move on the current game\n Move is passed in as a parameter so you can potentially apply any arbitrary move to a game\n just to see the outcome\n \"\"\"\n if move.is_complete:\n move.evaluate()\n else:\n raise ValidationError(\"You are playing a move that's not completed\")\n\n def play(self):\n \"\"\"\n TRY to apply given move on the game and see what happens\n self.player1 and self.player2 will be manipulated in memory to render the result,\n but they will not be written into the database\n \"\"\"\n if self.winner or self.loser or self.is_draw:\n raise ValidationError(\"This game is already over\")\n self.refresh_from_db()\n for move in self.move_set.order_by(\"id\"):\n try:\n self.play_single_move(move)\n except ValidationError:\n break\n if self.player1.is_dead or self.player2.is_dead:\n if self.player1.is_dead and self.player2.is_dead:\n self.is_draw = True\n if self.player1.is_dead:\n self.winner = self.player2_user\n self.loser = self.player1_user\n if self.player2.is_dead:\n self.winner = self.player1_user\n self.loser = self.player2_user\n return self.save()\n self.generate_empty_move()\n\n\nclass Move(models.Model):\n \"\"\"\n Model holds information of a move\n \"\"\"\n\n guid = models.UUIDField(\n default=uuid.uuid4, editable=False, help_text=\"GUID of this move\"\n )\n game = models.ForeignKey(\n Game, on_delete=models.CASCADE, help_text=\"The game that this move happens in\"\n )\n if_user = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"The user who chooses the if statement\",\n )\n if_statement_options = models.CharField(\n max_length=255, help_text=\"Available if statement ids\"\n )\n if_statement = models.CharField(max_length=255, help_text=\"Executable if statement\")\n then_user = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n help_text=\"The user who chooses the then statement\",\n )\n then_statement_options = models.CharField(\n max_length=255, help_text=\"Available then statement ids\"\n )\n then_statement = models.CharField(\n max_length=255, help_text=\"Executable then statement\"\n )\n\n def __str__(self):\n \"\"\"\n String representation of this move\n \"\"\"\n return f\"If {self.if_statement}, then {self.then_statement}\"\n\n @property\n def is_complete(self):\n \"\"\"\n Return true if both if_statement and then_statement are filled in\n \"\"\"\n return self.if_statement != \"\" and self.then_statement != \"\"\n\n def evaluate(self):\n \"\"\"\n Evaluate the condition and event statements and update the stats of each player\n\n if_statement examples:\n \"operating_player.attack > opponent_player.attack\" (Player who has higher attack)\n \"operating_player.hp > 30\" (Player whose hp is higher than 30)\n\n then_statement examples:\n \"operating_player.attack = operating_player.attack + 5\" (Will increase attack by 5)\n \"operating_player.hp = operating_player.hp - opponent_player.attack\"\n (Will lose hp by the attack of the other player)\n \"\"\"\n player1 = self.game.player1\n player2 = self.game.player2\n if IF_STATEMENTS[self.if_statement].evaluate(\n operating_player=player1, opponent_player=player2\n ):\n # if (self.game.player1.attack > self.game.player2.attack)\n THEN_STATEMENTS[self.then_statement].execute(\n operating_player=player1, opponent_player=player2\n )\n\n # self.game.player1.hp = self.game.player1.hp - self.game.player2.attack\n # Switch the order and execute again to make sure we update the right player\n if IF_STATEMENTS[self.if_statement].evaluate(\n operating_player=player2, opponent_player=player1\n ):\n THEN_STATEMENTS[self.then_statement].execute(\n operating_player=player2, opponent_player=player1\n )\n", "sub_path": "ifthen/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 14677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "django.db.models.Model", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}, {"api_name": "exceptions.ValidationError", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 125, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 130, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 137, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 137, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 141, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 142, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 141, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 145, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 149, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 149, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 153, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 157, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 158, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 157, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 161, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 161, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 165, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 166, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 165, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 169, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 173, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 174, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 173, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 177, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 181, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 181, "usage_type": "name"}, {"api_name": "constants.GameState.EMPTY_GAME", "line_number": 199, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 199, "usage_type": "name"}, {"api_name": "constants.GameState.HALF_JOINED_GAME", "line_number": 201, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 201, "usage_type": "name"}, {"api_name": "constants.GameState.SETUP_GAME", "line_number": 204, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 204, "usage_type": "name"}, {"api_name": "constants.GameState.HALF_SETUP_GAME", "line_number": 206, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 206, "usage_type": "name"}, {"api_name": "constants.GameState.END_GAME", "line_number": 208, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 208, "usage_type": "name"}, {"api_name": "constants.GameState.DRAW_GAME", "line_number": 210, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 210, "usage_type": "name"}, {"api_name": "constants.GameState.GENERATE_MOVE", "line_number": 213, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 213, "usage_type": "name"}, {"api_name": "constants.GameState.WAITING_FOR_TWO_MOVES", "line_number": 215, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 215, "usage_type": "name"}, {"api_name": "constants.GameState.WAITING_FOR_ONE_MOVE", "line_number": 217, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 217, "usage_type": "name"}, {"api_name": "constants.GameState.EMPTY_GAME", "line_number": 232, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 232, "usage_type": "name"}, {"api_name": "constants.GameState.HALF_JOINED_GAME", "line_number": 233, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 233, "usage_type": "name"}, {"api_name": "exceptions.ValidationError", "line_number": 235, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 237, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 239, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 247, "usage_type": "call"}, {"api_name": "constants.GameState.GENERATE_MOVE", "line_number": 260, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 260, "usage_type": "name"}, {"api_name": "exceptions.ValidationError", "line_number": 261, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 264, "usage_type": "call"}, {"api_name": "statements.ifs.STATEMENTS.keys", "line_number": 264, "usage_type": "call"}, {"api_name": "statements.ifs.STATEMENTS", "line_number": 264, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 267, "usage_type": "call"}, {"api_name": "statements.thens.STATEMENTS.keys", "line_number": 267, "usage_type": "call"}, {"api_name": "statements.thens.STATEMENTS", "line_number": 267, "usage_type": "name"}, {"api_name": "constants.GameState.WAITING_FOR_ONE_MOVE", "line_number": 274, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 274, "usage_type": "name"}, {"api_name": "constants.GameState.WAITING_FOR_TWO_MOVES", "line_number": 275, "usage_type": "attribute"}, {"api_name": "constants.GameState", "line_number": 275, "usage_type": "name"}, {"api_name": "exceptions.ValidationError", "line_number": 277, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 285, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 289, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 296, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 300, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 314, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 323, "usage_type": "call"}, {"api_name": "exceptions.ValidationError", "line_number": 328, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 343, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 343, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 348, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 348, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 349, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 351, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 351, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 352, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 352, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 354, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 355, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 354, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 358, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 358, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 362, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 362, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 365, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 365, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 366, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 367, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 366, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 370, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 370, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 374, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 374, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 377, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 377, "usage_type": "name"}, {"api_name": "statements.ifs.STATEMENTS", "line_number": 409, "usage_type": "name"}, {"api_name": "statements.thens.STATEMENTS", "line_number": 413, "usage_type": "name"}, {"api_name": "statements.ifs.STATEMENTS", "line_number": 419, "usage_type": "name"}, {"api_name": "statements.thens.STATEMENTS", "line_number": 422, "usage_type": "name"}]} +{"seq_id": "32890507", "text": "\"\"\"\n@author: jpzxshi & zen\n\"\"\"\nimport os\nimport time\nimport numpy as np\nimport torch\n\nfrom .nn import LossNN\nfrom .utils import timing, cross_entropy_loss\n\nclass Brain:\n '''Runner based on torch.\n '''\n brain = None\n \n @classmethod\n def Init(cls, data, net, criterion, optimizer, lr, iterations, lbfgs_steps, path = None, batch_size=None, \n batch_size_test=None, weight_decay=0, print_every=1000, save=False, callback=None, dtype='float', device='cpu'):\n cls.brain = cls(data, net, criterion, optimizer, lr, weight_decay, iterations, lbfgs_steps, path, batch_size, \n batch_size_test, print_every, save, callback, dtype, device)\n \n @classmethod\n def Run(cls):\n cls.brain.run()\n \n @classmethod\n def Run_reservoir(cls):\n cls.brain.run_reservoir()\n \n @classmethod\n def Restore(cls):\n cls.brain.restore()\n \n @classmethod\n def Output(cls, data=True, best_model=True, loss_history=True, info=None, **kwargs):\n cls.brain.output(data, best_model, loss_history, info, **kwargs)\n \n @classmethod\n def Loss_history(cls):\n return cls.brain.loss_history\n \n @classmethod\n def Encounter_nan(cls):\n return cls.brain.encounter_nan\n \n @classmethod\n def Best_model(cls):\n return cls.brain.best_model\n \n def __init__(self, data, net, criterion, optimizer, lr, weight_decay, iterations, lbfgs_steps, path, batch_size, \n batch_size_test, print_every, save, callback, dtype, device):\n self.data = data\n self.net = net\n self.criterion = criterion\n self.optimizer = optimizer\n self.lr = lr\n self.weight_decay = weight_decay\n self.iterations = iterations\n self.lbfgs_steps = lbfgs_steps\n self.path = path\n self.batch_size = batch_size\n self.batch_size_test = batch_size_test\n self.print_every = print_every\n self.save = save\n self.callback = callback\n self.dtype = dtype\n self.device = device\n \n self.loss_history = None\n self.encounter_nan = False\n self.best_model = None\n \n self.__optimizer = None\n self.__criterion = None\n \n @timing\n def run(self):\n self.__init_brain()\n print('Training...', flush=True)\n loss_history = []\n for i in range(self.iterations + 1):\n X_train, y_train = self.data.get_batch(self.batch_size)\n loss = self.__criterion(self.net(X_train), y_train)\n if i % self.print_every == 0 or i == self.iterations:\n X_test, y_test = self.data.get_batch_test(self.batch_size_test)\n loss_test = self.__criterion(self.net(X_test), y_test)\n # print('{:<9}Train loss: %.4e{:<25}Test loss: %.4e{:<25}'.format(i, loss.item(), loss_test.item()), flush=True)\n print(' ADAM || It: %05d, Loss: %.4e, Test: %.4e' % \n (i, loss.item(), loss_test.item()))\n if torch.any(torch.isnan(loss)):\n self.encounter_nan = True\n print('Encountering nan, stop training', flush=True)\n return None\n if self.save:\n if not os.path.exists('model'): os.mkdir('model')\n if self.path == None:\n torch.save(self.net, 'model/model{}.pkl'.format(i))\n else:\n if not os.path.isdir('model/'+self.path): os.makedirs('model/'+self.path)\n torch.save(self.net, 'model/{}/model{}.pkl'.format(self.path, i))\n if self.callback is not None: \n output = self.callback(self.data, self.net)\n loss_history.append([i, loss.item(), loss_test.item(), *output])\n else:\n loss_history.append([i, loss.item(), loss_test.item()])\n if i < self.iterations:\n self.__optimizer.zero_grad()\n loss.backward()\n self.__optimizer.step()\n self.loss_history = np.array(loss_history)\n # print('Done!', flush=True)\n return self.loss_history\n \n def restore(self):\n if self.loss_history is not None and self.save == True:\n best_loss_index = np.argmin(self.loss_history[:, 1])\n iteration = int(self.loss_history[best_loss_index, 0])\n loss_train = self.loss_history[best_loss_index, 1]\n loss_test = self.loss_history[best_loss_index, 2]\n print('BestADAM It: %05d, Loss: %.4e, Test: %.4e' % \n (iteration, loss_train, loss_test))\n if self.path == None:\n \tself.best_model = torch.load('model/model{}.pkl'.format(iteration))\n else:\n self.best_model = torch.load('model/{}/model{}.pkl'.format(self.path,iteration))\n else:\n raise RuntimeError('restore before running or without saved models')\n from torch.optim import LBFGS\n optim = LBFGS(self.best_model.parameters(), history_size=100,\n max_iter=self.lbfgs_steps,\n tolerance_grad=1e-09, tolerance_change=1e-09,\n line_search_fn=\"strong_wolfe\")\n self.it = 0\n if self.lbfgs_steps != 0:\n def closure():\n if torch.is_grad_enabled():\n optim.zero_grad()\n X_train, y_train = self.data.get_batch(None)\n X_test, y_test = self.data.get_batch_test(None)\n loss = self.best_model.criterion(self.best_model(X_train), y_train)\n loss_test = self.best_model.criterion(self.best_model(X_test), y_test)\n it = self.it + 1\n if it % self.print_every == 0 or it == self.lbfgs_steps:\n print('L-BFGS|| It: %05d, Loss: %.4e, Test: %.4e' % \n (it, loss.item(), loss_test.item()))\n self.it = it\n if loss.requires_grad:\n loss.backward()\n return loss\n optim.step(closure)\n print('Done!', flush=True)\n return self.best_model\n \n def output(self, data, best_model, loss_history, info, **kwargs):\n if self.path is None:\n path = './outputs/' + time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))\n else:\n path = './outputs/' + self.path\n if not os.path.isdir(path): os.makedirs(path)\n if data:\n def save_data(fname, data):\n if isinstance(data, dict):\n np.savez_compressed(path + '/' + fname, **data)\n else:\n np.save(path + '/' + fname, data)\n save_data('X_train', self.data.X_train_np)\n save_data('y_train', self.data.y_train_np)\n save_data('X_test', self.data.X_test_np)\n save_data('y_test', self.data.y_test_np)\n if best_model:\n torch.save(self.best_model, path + '/model_best.pkl')\n if loss_history:\n np.savetxt(path + '/loss.txt', self.loss_history)\n if info is not None:\n with open(path + '/info.txt', 'w') as f:\n for key, arg in info.items():\n f.write('{}: {}\\n'.format(key, str(arg)))\n for key, arg in kwargs.items():\n np.savetxt(path + '/' + key + '.txt', arg)\n \n def __init_brain(self):\n self.loss_history = None\n self.encounter_nan = False\n self.best_model = None\n self.data.device = self.device\n self.data.dtype = self.dtype\n self.net.device = self.device\n self.net.dtype = self.dtype\n self.__init_optimizer()\n self.__init_criterion()\n \n def __init_optimizer(self):\n if self.optimizer == 'adam':\n self.__optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n else:\n raise NotImplementedError\n \n def __init_criterion(self):\n if isinstance(self.net, LossNN):\n self.__criterion = self.net.criterion\n if self.criterion is not None:\n import warnings\n warnings.warn('loss-oriented neural network has already implemented its loss function')\n elif self.criterion == 'MSE':\n self.__criterion = torch.nn.MSELoss()\n elif self.criterion == 'CrossEntropy':\n self.__criterion = cross_entropy_loss\n else:\n raise NotImplementedError\n", "sub_path": "learner/brain.py", "file_name": "brain.py", "file_ext": "py", "file_size_in_byte": 8618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.any", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.timing", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.optim.LBFGS", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.is_grad_enabled", "line_number": 137, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 157, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 157, "usage_type": "call"}, {"api_name": "time.time", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 195, "usage_type": "attribute"}, {"api_name": "nn.LossNN", "line_number": 200, "usage_type": "argument"}, {"api_name": "warnings.warn", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 206, "usage_type": "attribute"}, {"api_name": "utils.cross_entropy_loss", "line_number": 208, "usage_type": "name"}]} +{"seq_id": "625082202", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 16 15:11:17 2019\n\n@author: rain\n\"\"\"\n\nimport pandas as pd\n#### hand writing example\nfrom keras.datasets import mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n############################## network building\nimport keras\nfrom keras import models\nfrom keras import layers\nnetwork = models.Sequential()\nnetwork.add(layers.Dense(512,activation='relu',input_shape=(28*28,)))\nnetwork.add(layers.Dense(10,activation='softmax'))\nrms=keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) #先定义优化器,再放入\nnetwork.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy']\n )\n\n############################# put data into model\ntrain_images=train_images.reshape((60000,28*28))\ntrain_images=train_images.astype('float32')/255 # ?\ntest_images=test_images.reshape((10000,28*28))\ntest_images=test_images.astype('float32')/255 \n\n############################# preparing for labels\nfrom keras.utils import to_categorical\ntrain_labels=to_categorical(train_labels) # 把label变成矩阵,一一对应model出来的值\ntest_labels=to_categorical(test_labels) \n\nnetwork.fit(train_images,train_labels, epochs=7, batch_size=518)\n\n########################### 放到测试集上\ntest_loss, test_acc= network.evaluate(test_images, test_labels)\n\n########################### show the image\ndigit = train_images[5]\nimport matplotlib.pyplot as plt\nfrom ipykernel.kernelapp import IPKernelApp\nplt.imshow(digit)\n", "sub_path": "各种神经网络模板/ex_deeplearning.py", "file_name": "ex_deeplearning.py", "file_ext": "py", "file_size_in_byte": 1589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 12, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 18, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 20, "usage_type": "name"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 21, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "91463215", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom scrapy.exporters import CsvItemExporter\n\nimport pymysql\n\n\n# class SpidersPipeline:\n# def process_item(self, item, spider):\n\n\nclass MysqlPipeline(object):\n def __init__(self):\n # 建立连接\n self.conn = pymysql.connect(\n host='localhost',\n port=3306,\n user='root',\n password='123',\n db='test1',\n charset='utf8mb4'\n )\n # 创建游标\n self.cursor = self.conn.cursor()\n\n def process_item(self, item, spider):\n # sql语句\n insert_sql = \"insert into movies(release,name,tag) VALUES(%s,%s,%s)\"\n # 执行插入数据到数据库操作\n self.cursor.execute(insert_sql, (item['release'], item['name'], item['tag']))\n # 提交,不进行提交无法保存到数据库\n self.conn.commit()\n\n def close_spider(self, spider):\n # 关闭游标和连接\n self.cursor.close()\n self.conn.close()\n", "sub_path": "Week02/spiders/spiders/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pymysql.connect", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "107265824", "text": "from django import forms\nfrom .models import Lead, TempUser\n\n\nclass NewLeadForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.Meta.fields:\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\n class Meta:\n model = Lead\n fields = ['company_name', 'number_locations', 'contact_name', 'contact_email', 'contact_phone']\n\n\n\nclass LoginForm(forms.ModelForm):\n\n class Meta:\n model = TempUser\n fields = ['email']\n\n\nclass NewLeadFormSpiffy(NewLeadForm):\n\n class Meta(NewLeadForm.Meta):\n fields = NewLeadForm.Meta.fields + ['status']", "sub_path": "leads/lead/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "models.Lead", "line_number": 15, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "models.TempUser", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "635960820", "text": "import bpy\nimport struct\nimport mathutils\n\nbl_info = {\n \"name\": \"KaiKai Exporter\",\n \"author\": \"Francisco Blanco\",\n \"blender\": (2,6,4),\n \"version\": (0,0,8),\n \"location\": \"File > Import-Export\",\n \"description\": \"Export a Kaikai model and animation\",\n \"category\": \"Import-Export\"\n}\n\nclass Weight():\n joint = -1\n weight = 0.0\n\nclass Joint():\n father = -1\n join = None\n def addJoint(self,j):\n self.joint = j\n def getJoint(self):\n return self.joint\n def setFather(self, i):\n father = i\n def getFather(self):\n return father \n\nclass Skeleton():\n cant = 0\n fathers_bind = dict()\n joint_list = list()\n rotation = mathutils.Matrix()\n scale = mathutils.Matrix()\n position = mathutils.Matrix()\n name = \"bone_unamed\"\n def addJoint(self,joint):\n self.fathers_bind[joint.name] = self.cant\n self.cant = self.cant +1\n self.joint_list.append(joint)\n def getJointCount(self):\n return self.cant \n def getJoint(self):\n return self.joint_list\n def getFather(self,father_name):\n return self.fathers_bind[father_name]\n\nclass Mesh():\n meshname = 0\n vertexcant = 0\n facescant = 0\n materialcant = 0\n position = 0\n rotation = mathutils.Matrix()\n scale = mathutils.Matrix()\n vertex = 0\n normal = []\n faces = 0\n material = []\n texture_vertex = []\n vertex_group = list()\n def addVertexGroup(self, vg):\n self.vertex_group = vg\n def addVertex(self,vert):\n self.vertex = vert\n def addNormal(self,norm):\n self.normal.append(norm) \n def addTextureVertex(self,tv):\n self.texture_vertex.append(tv)\n def addFace(self,face): \n self.faces = face \n def addMaterial(self,materialname):\n self.material.append(materialname) \n def getVertexCountString(self):\n return \"%s\" % self.vertexcant \n def getNormalCountString(self):\n return \"%s\" % self.vertexcant \n def getFacesCountString(self):\n return \"%s\" % self.facescant\n def getMaterialCountString(self):\n return \"%s\" % len(self.material) \n def getTextureVertexCountString(self):\n return \"%s\" % len(self.texture_vertex)\n def getVertexBuffer(self):\n return self.vertex \n def saveVertex(self,f):\n uv = self.texture_vertex\n for i,v in enumerate(self.vertex):\n f.write(\"%d \" % i)\n auxv = self.scale * v.co\n auxv = self.rotation * auxv\n f.write(\"%.2f \" % auxv.x)\n f.write(\"%.2f \" % auxv.y)\n f.write(\"%.2f \" % auxv.z)\n f.write(\"%.2f \" % v.normal.x)\n f.write(\"%.2f \" % v.normal.y)\n f.write(\"%.2f \" % v.normal.z)\n auxlist = list()\n try:\n f.write(\"%.2f \" % uv[i][0])\n f.write(\"%.2f\\n\" % (1.0 - uv[i][1])) \n except IndexError:\n f.write(\"0.0 \")\n f.write(\"0.0\\n\") \n for j,vg in enumerate(self.vertex_group):\n print(\"%s \" % j)\n wei = Weight()\n try:\n wei.joint = j\n wei.weight = vg.weight(i)\n auxlist.append(wei)\n except RuntimeError:\n print(\"Vertex %d not found\" % i) \n if len(auxlist) < 4: \n f.write(\"WEIGHTCOUNT %d \" % len(auxlist)) \n for w in auxlist:\n f.write(\"%d \" % w.joint)\n f.write(\"%.2f \" % w.weight)\n else: \n f.write(\"WEIGHTCOUNT %d \" % 4) \n f.write(\"%d \" % auxlist[0].joint)\n f.write(\"%.2f \" % auxlist[0].weight)\n f.write(\"%d \" % auxlist[1].joint)\n f.write(\"%.2f \" % auxlist[1].weight)\n f.write(\"%d \" % auxlist[2].joint)\n f.write(\"%.2f \" % auxlist[2].weight)\n f.write(\"%d \" % auxlist[3].joint)\n f.write(\"%.2f \" % auxlist[3].weight) \n f.write(\"\\n\") \n \n def saveFaces(self,f):\n for i,fa in enumerate(self.faces):\n f.write(\"%d \" % i)\n f.write(\"%d \" % fa.vertices[0])\n f.write(\"%d \" % fa.vertices[1])\n f.write(\"%d\\n\" % fa.vertices[2])\n def saveMaterials(self,f):\n for mat in self.material:\n f.write(\"%s \" % mat.split(\".\",1)[0]) \n f.write(\"\\n\") \n def setScaleMatrix(self, _scale):\n self.scale[0][0] = _scale[0]\n self.scale[1][1] = _scale[1]\n self.scale[2][2] = _scale[2] \n def setRotationMatrix(self, _rotation):\n if _rotation.rotation_mode == 'XYZ':\n self.rotation = _rotation.rotation_euler.to_matrix()\n if _rotation.rotation_mode == 'QUATERNION':\n self.rotation = _rotation.rotation_quaternion.to_matrix()\n \nclass Objeto:\n mesh = []\n skeleton = Skeleton()\n meshcount = 0\n def print(self):\n print(\"\\n\\n Preparing Object \\n\\n\")\n print(\"Mesh Count: \"+\"%s\" % self.meshcount)\n for m in self.mesh:\n print(\"Vertex Count: \"+m.getVertexCountString())\n print(\"Normals Count: \"+m.getNormalCountString())\n print(\"UV Coord Count: \"+m.getTextureVertexCountString())\n print(\"Index Count: \"+m.getFacesCountString())\n def addMesh(self,_mesh):\n self.mesh.append(_mesh)\n self.meshcount+=1\n def saveObject(self,f):\n f.write('SOURCE Blender Python Script Exporter\\n')\n f.write('MESHES '+\"%s\"%len(self.mesh)+'\\n')\n f.write('JOINTS '+\"%s\"% self.skeleton.getJointCount()+'\\n')\n self.saveMeshs(f)\n def saveMeshs(self,f): \n for i,j in enumerate(self.skeleton.getJoint()):\n f.write('JOINT ')\n f.write('%s ' % i)\n f.write('%s ' % j.name)\n if j.parent is not None:\n f.write('%s ' % self.skeleton.getFather(j.parent.name))\n else:\n f.write('%s ' % -1) \n origin = self.skeleton.rotation * j.head_local\n origin[0] = origin[0] + self.skeleton.position[0]\n origin[1] = origin[1] + self.skeleton.position[1]\n origin[2] = origin[2] + self.skeleton.position[2] \n f.write('%s ' % origin[0])\n f.write('%s ' % origin[1])\n f.write('%s ' % origin[2])\n tail = self.skeleton.rotation * j.tail_local\n tail[0] = tail[0] + self.skeleton.position[0]\n tail[1] = tail[1] + self.skeleton.position[1]\n tail[2] = tail[2] + self.skeleton.position[2] \n f.write('%s ' % tail[0])\n f.write('%s ' % tail[1])\n f.write('%s ' % tail[2])\n f.write('%s ' % j.matrix.to_quaternion().x)\n f.write('%s ' % j.matrix.to_quaternion().y)\n f.write('%s ' % j.matrix.to_quaternion().z)\n f.write('%s ' % j.matrix.to_quaternion().w)\n f.write('\\n')\n for m in self.mesh:\n f.write('MESH '+m.meshname+' ')\n f.write(m.getVertexCountString()+' ')\n f.write(m.getFacesCountString()+'\\n')\n m.saveVertex(f)\n m.saveFaces(f)\n m.saveMaterials(f)\n f.write('MATERIAL '+m.getMaterialCountString()+'\\n')\n m.saveMaterials(f)\n def addJoint(self,_joint):\n skeleton = _joint \n def getJoint(self):\n return self.skeleton \n \ndef scaleVertex(v,s):\n m = mathutils.Matrix()\n m[0][0]=s[0]\n m[1][1]=s[1]\n m[2][2]=s[2]\n return v * m\n\ndef exportarDatosDeMalla(obj):\n selected_objects = bpy.context.selected_objects\n for object in selected_objects:\n if object.type == 'MESH':\n auxjoint = obj.getJoint()\n auxmesh = Mesh()\n auxmesh.meshname = object.name\n auxmesh.position = object.location\n auxmesh.setRotationMatrix(object)\n auxmesh.setScaleMatrix(object.scale)\n auxmesh.addVertexGroup(object.vertex_groups)\n # get Mesh Vertex in a list, whit Vertex i have normal and coordinates\n vertices = object.data.vertices\n auxmesh.vertexcant = len(vertices)\n auxmesh.addVertex(vertices)\n # get Mesh Faces in a list\n faces = object.data.polygons\n auxmesh.facescant = len(faces)\n auxmesh.addFace(faces)\n # get UV Map, if it exists\n if len(object.data.uv_layers) != 0:\n print(\"tiene texturas\\n\")\n aux_tex = object.data.uv_layers.active.data\n textures = dict()\n for i,p in enumerate(faces):\n textures[p.vertices[0]] = aux_tex[i*3]\n textures[p.vertices[1]] = aux_tex[i*3+1]\n textures[p.vertices[2]] = aux_tex[i*3+2] \n for k,t in textures.items():\n auxmesh.addTextureVertex(t.uv)\n if(len(object.data.materials) != 0): \n if(hasattr(object.data.materials[0].active_texture,'image')):\n auxmesh.addMaterial(object.data.materials[0].active_texture.image.name) \n obj.addMesh(auxmesh)\n armature = object.parent\n if armature is not None:\n if armature.type == 'ARMATURE':\n obj.skeleton.position = armature.location\n if armature.rotation_mode == 'XYZ':\n obj.skeleton.rotation = armature.rotation_euler.to_matrix()\n if armature.rotation_mode == 'QUATERNION':\n obj.skeleton.rotation = armature.rotation_quaternion.to_matrix() \n for joint in armature.data.bones:\n auxjoint.addJoint(joint)\n obj.addJoint(auxjoint)\n \ndef createFile(obj,file,filepath):\n obj.print()\n obj.saveObject(file)\n \ndef write_some_data(context, filepath, mesh):\n print(\"Creating mesh file: \" + filepath)\n o = Objeto() \n if mesh:\n exportarDatosDeMalla(o) \n f = open(filepath, 'w') \n createFile(o,f,filepath)\n f.close()\n return {'FINISHED'}\n\n\n# ExportHelper is a helper class, defines filename and\n# invoke() function which calls the file selector.\nfrom bpy_extras.io_utils import ExportHelper\nfrom bpy.props import StringProperty, BoolProperty, EnumProperty\nfrom bpy.types import Operator\n\n\nclass ExportSomeData(Operator, ExportHelper):\n \"\"\"This appears in the tooltip of the operator and in the generated docs\"\"\"\n bl_idname = \"export_test.some_data\" # important since its how bpy.ops.import_test.some_data is constructed\n bl_label = \"Exportar Kaikai mesh\"\n\n # ExportHelper mixin class uses this\n filename_ext = \".kkm\"\n\n filter_glob = StringProperty(\n default=\"*.kkm\",\n options={'HIDDEN'},\n )\n\n # List of operator properties, the attributes will be assigned\n # to the class instance from the operator settings before calling.\n mesh = BoolProperty(\n name=\"Mesh\",\n description=\"Exporta la malla del objeto\",\n default=True,\n ) \n\n def execute(self, context):\n return write_some_data(context, self.filepath, self.mesh)\n\n\n# Only needed if you want to add into a dynamic menu\ndef menu_func_export(self, context):\n self.layout.operator(ExportSomeData.bl_idname, text=\"Export KaiKai Mesh (.kkm)\")\n\n\ndef register():\n bpy.utils.register_class(ExportSomeData)\n bpy.types.INFO_MT_file_export.append(menu_func_export)\n\n\ndef unregister():\n bpy.utils.unregister_class(ExportSomeData)\n bpy.types.INFO_MT_file_export.remove(menu_func_export)\n\n\nif __name__ == \"__main__\":\n register()\n\n # test call\n bpy.ops.export_test.some_data('INVOKE_DEFAULT')\n", "sub_path": "IOKaiKaiExporter.py", "file_name": "IOKaiKaiExporter.py", "file_ext": "py", "file_size_in_byte": 12012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "mathutils.Matrix", "line_number": 35, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 36, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 37, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 56, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 57, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 216, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 223, "usage_type": "attribute"}, {"api_name": "bpy.types.Operator", "line_number": 290, "usage_type": "name"}, {"api_name": "bpy_extras.io_utils.ExportHelper", "line_number": 290, "usage_type": "name"}, {"api_name": "bpy.props.StringProperty", "line_number": 298, "usage_type": "call"}, {"api_name": "bpy.props.BoolProperty", "line_number": 305, "usage_type": "call"}, {"api_name": "bpy.utils.register_class", "line_number": 321, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 321, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_file_export.append", "line_number": 322, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 322, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 326, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 326, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_file_export.remove", "line_number": 327, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 327, "usage_type": "attribute"}, {"api_name": "bpy.ops.export_test.some_data", "line_number": 334, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 334, "usage_type": "attribute"}]} +{"seq_id": "388138249", "text": "from datetime import datetime\nfrom cms.utils import get_page_from_request\nfrom annoying.functions import get_config\n\ndef page_ancestors(request):\n page = get_page_from_request(request)\n ancestors_list = list()\n if page:\n ancestors_list = [ ance.reverse_id for ance in page.get_ancestors() if ance.reverse_id ]\n\n return { 'page_ancestors': ancestors_list }\n\n\ndef forum_period(request):\n today = datetime.today()\n \n return {\n 'REG_START' : get_config('REG_START', today),\n 'REG_END': get_config('REG_END', today),\n 'NOW': today,\n }\n", "sub_path": "plugins/context_processors.py", "file_name": "context_processors.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cms.utils.get_page_from_request", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "annoying.functions.get_config", "line_number": 18, "usage_type": "call"}, {"api_name": "annoying.functions.get_config", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "263208613", "text": "\"\"\"\nLightStep's implementations of the basictracer Recorder API.\n\nhttps://github.com/opentracing/basictracer-python\n\nSee the API definition for comments.\n\"\"\"\n\nfrom socket import error as socket_error\n\nimport atexit\nimport contextlib\nimport jsonpickle\nimport logging\nimport pprint\nimport ssl\nimport sys\nimport threading\nimport time\nimport warnings\n\nfrom thrift import Thrift\nfrom basictracer.recorder import SpanRecorder\n\nfrom .crouton import ttypes\nfrom . import constants, version as cruntime_version, util, connection as conn\n\n\nclass Recorder(SpanRecorder):\n \"\"\"Recorder records and reports a BasicSpan to LightStep.\"\"\"\n def __init__(self, **kwargs):\n self.runtime = Runtime(**kwargs)\n\n def record_span(self, span):\n \"\"\"Per BasicSpan.record_span\"\"\"\n self.runtime._add_span(span)\n\n def flush(self):\n \"\"\"Force a flush of buffered Span data to LightStep\"\"\"\n self.runtime.flush()\n\n\nclass LoggingRecorder(SpanRecorder):\n \"\"\"LoggingRecorder prints all spans to stdout.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._runtime_guid = util._generate_guid()\n\n def record_span(self, span):\n \"\"\"Per BasicSpan.record_span\"\"\"\n\n logs = []\n for log in span.logs:\n event = \"\"\n if len(log.key_values[\"event\"]) > 0:\n # Don't allow for arbitrarily long log messages.\n if sys.getsizeof(log.key_values[\"event\"]) > constants.MAX_LOG_MEMORY:\n event = log.key_values[\"event\"][:constants.MAX_LOG_LEN]\n else:\n event = log.key_values[\"event\"]\n logs.append(ttypes.LogRecord(\n timestamp_micros=long(util._time_to_micros(log.timestamp)),\n stable_name=event,\n payload_json=log.key_values[\"payload\"]))\n logging.info(\n 'Reporting span %s \\n with logs %s',\n self._pretty_span(span),\n self._pretty_logs(logs))\n\n def flush(self):\n \"\"\"A noop for LoggingRecorder\"\"\"\n return\n\n def _pretty_span(self, span):\n \"\"\"A helper to format a span for console logging\"\"\"\n span = {\n 'trace_guid': span.context.trace_id,\n 'span_guid': span.context.span_id,\n 'runtime_guid': util._id_to_hex(self._runtime_guid),\n 'span_name': span.operation_name,\n 'oldest_micros': span.start_time,\n 'youngest_micros': util._now_micros(),\n }\n return ''.join(['\\n ' + attr + \": \" + str(span[attr]) for attr in span])\n\n def _pretty_logs(self, logs):\n \"\"\"A helper to format logs for console logging\"\"\"\n return ''.join(['\\n ' + pprint.pformat(log) for log in logs])\n\n\nclass Runtime(object):\n \"\"\"Instances of Runtime send spans to the LightStep collector.\n\n :param str group_name: name identifying the type of service that is being\n tracked\n :param str access_token: project's access token\n :param bool secure: whether HTTP connection is secure\n :param str service_host: Service host name\n :param int service_port: Service port number\n :param int max_span_records: Maximum number of spans records to buffer\n :param bool certificate_verification: if False, will ignore SSL\n certification verification (in ALL HTTPS calls, not just in this\n library) for the lifetime of this process; intended for debugging\n purposes only\n \"\"\"\n def __init__(self,\n group_name=None,\n access_token='',\n secure=True,\n service_host=\"collector.lightstep.com\",\n service_port=443,\n max_span_records=constants.DEFAULT_MAX_SPAN_RECORDS,\n certificate_verification=True,\n periodic_flush_seconds=constants.FLUSH_PERIOD_SECS):\n\n # Fail fast on a bad access token\n if isinstance(access_token, basestring) == False:\n raise Exception('access_token must be a string')\n\n if certificate_verification is False:\n warnings.warn('SSL CERTIFICATE VERIFICATION turned off. ALL FUTURE HTTPS calls will be unverified.')\n ssl._create_default_https_context = ssl._create_unverified_context\n\n if group_name is None:\n group_name = sys.argv[0]\n\n # Thrift runtime configuration\n self.guid = util._generate_guid()\n timestamp = util._now_micros()\n\n version = '.'.join(map(str, sys.version_info[0:3]))\n attrs = [\n ttypes.KeyValue(\"cruntime_platform\", \"python\"),\n ttypes.KeyValue(\"cruntime_version\", cruntime_version.CRUNTIME_VERSION),\n ttypes.KeyValue(\"python_version\", version),\n ]\n\n # Thrift is picky about the types being correct, so we're explicit here\n self._runtime = ttypes.Runtime(\n util._id_to_hex(self.guid),\n long(timestamp),\n str(group_name),\n attrs)\n self._service_url = util._service_url_from_hostport(secure,\n service_host,\n service_port)\n self._auth = ttypes.Auth(access_token)\n self._mutex = threading.Lock()\n self._span_records = []\n self._max_span_records = max_span_records\n\n self._disabled_runtime = False\n atexit.register(self.shutdown)\n\n self._periodic_flush_seconds = periodic_flush_seconds\n if self._periodic_flush_seconds <= 0:\n warnings.warn(\n 'Runtime(periodic_flush_seconds={0}) means we will never flush to lightstep unless explicitly requested.'.format(\n self._periodic_flush_seconds))\n self._flush_connection = None\n else:\n self._flush_connection = conn._Connection(self._service_url)\n self._flush_connection.open()\n self._flush_thread = threading.Thread(target=self._flush_periodically,\n name=constants.FLUSH_THREAD_NAME)\n self._flush_thread.daemon = True\n self._flush_thread.start()\n\n def shutdown(self, flush=True):\n \"\"\"Shutdown the Runtime's connection by (optionally) flushing the\n remaining logs and spans and then disabling the Runtime.\n\n Note: spans and logs will no longer be reported after shutdown is called.\n\n Returns whether the data was successfully flushed.\n \"\"\"\n # Closing connection twice results in an error. Exit early\n # if runtime has already been disabled.\n if self._disabled_runtime:\n return False\n\n if flush:\n flushed = self.flush()\n\n if self._flush_connection:\n self._flush_connection.close()\n\n self._disabled_runtime = True\n\n return flushed\n\n def flush(self, connection=None):\n \"\"\"Immediately send unreported data to the server.\n\n Calling flush() will ensure that any current unreported data will be\n immediately sent to the host server.\n\n If connection is not specified, the report will sent to the server\n passed in to __init__. Note that custom connections are currently used\n for unit testing against a mocked connection.\n\n Returns whether the data was successfully flushed.\n \"\"\"\n if self._disabled_runtime:\n return False\n\n if connection is not None:\n return self._flush_worker(connection)\n return self._flush_worker(self._flush_connection)\n\n\n def _flush_periodically(self):\n \"\"\"Periodically send reports to the server.\n\n Runs in a dedicated daemon thread (self._flush_thread).\n \"\"\"\n # Open the connection\n while not self._disabled_runtime and not self._flush_connection.ready:\n time.sleep(self._periodic_flush_seconds)\n self._flush_connection.open()\n\n # Send data until we get disabled\n while not self._disabled_runtime:\n self._flush_worker(self._flush_connection)\n time.sleep(self._periodic_flush_seconds)\n\n def _flush_worker(self, connection):\n \"\"\"Use the given connection to transmit the current logs and spans as a\n report request.\"\"\"\n if connection == None:\n return False\n\n # If the connection is not ready, try reestablishing it. If that\n # fails just wait until the next flush attempt to try again.\n if not connection.ready:\n connection.open()\n if not connection.ready:\n return False\n\n report_request = self._construct_report_request()\n try:\n resp = connection.report(self._auth, report_request)\n\n # The resp may be None on failed reports\n if resp is not None:\n if resp.commands is not None:\n for command in resp.commands:\n if command.disable:\n self.shutdown(flush=False)\n # Return whether we sent any span data\n return len(report_request.span_records) > 0\n\n except Exception:\n self._restore_spans(report_request.span_records)\n return False\n\n\n def _construct_report_request(self):\n \"\"\"Construct a report request.\"\"\"\n report = None\n with self._mutex:\n report = ttypes.ReportRequest(self._runtime, self._span_records,\n None)\n self._span_records = []\n for span in report.span_records:\n for log in span.log_records:\n index = span.log_records.index(log)\n if log.payload_json is not None:\n try:\n log.payload_json = \\\n jsonpickle.encode(log.payload_json,\n unpicklable=False,\n make_refs=False,\n max_depth=constants.JSON_MAX_DEPTH)\n except:\n log.payload_json = jsonpickle.encode(constants.JSON_FAIL)\n span.log_records[index] = log\n return report\n\n def _add_span(self, span):\n \"\"\"Safely add a span to the buffer.\n\n Will delete a previously-added span if the limit has been reached.\n \"\"\"\n if self._disabled_runtime:\n return\n\n # Checking the len() here *could* result in a span getting dropped that\n # might have fit if a report started before the append(). This would only\n # happen if the client lib was being saturated anyway (and likely\n # dropping spans). But on the plus side, having the check here avoids\n # doing a span conversion when the span will just be dropped while also\n # keeping the lock scope minimized.\n with self._mutex:\n if len(self._span_records) >= self._max_span_records:\n return\n\n span_record = ttypes.SpanRecord(\n trace_guid=util._id_to_hex(span.context.trace_id),\n span_guid=util._id_to_hex(span.context.span_id),\n runtime_guid=util._id_to_hex(span._tracer.recorder.runtime.guid),\n span_name=str(span.operation_name),\n join_ids=[],\n oldest_micros=long(util._time_to_micros(span.start_time)),\n youngest_micros=long(util._time_to_micros(span.start_time + span.duration)),\n attributes=[],\n log_records=[]\n )\n\n if span.parent_id != None:\n span_record.attributes.append(\n ttypes.KeyValue(\n constants.PARENT_SPAN_GUID,\n util._id_to_hex(span.parent_id)))\n if span.tags:\n for key in span.tags:\n if key[:len(constants.JOIN_ID_TAG_PREFIX)] == constants.JOIN_ID_TAG_PREFIX:\n span_record.join_ids.append(ttypes.TraceJoinId(key, span.tags[key]))\n else:\n span_record.attributes.append(ttypes.KeyValue(key, span.tags[key]))\n\n for log in span.logs:\n event = \"\"\n if len(log.key_values[\"event\"]) > 0:\n # Don't allow for arbitrarily long log messages.\n if sys.getsizeof(log.key_values[\"event\"]) > constants.MAX_LOG_MEMORY:\n event = log.key_values[\"event\"][:constants.MAX_LOG_LEN]\n else:\n event = log.key_values[\"event\"]\n span_record.log_records.append(ttypes.LogRecord(\n timestamp_micros=long(util._time_to_micros(log.timestamp)),\n stable_name=event,\n payload_json=log.key_values[\"payload\"]))\n\n with self._mutex:\n if len(self._span_records) < self._max_span_records:\n self._span_records.append(span_record)\n\n def _restore_spans(self, span_records):\n \"\"\"Called after a flush error to move records back into the buffer\n \"\"\"\n if self._disabled_runtime:\n return\n\n with self._mutex:\n if len(self._span_records) >= self._max_span_records:\n return\n combined = span_records + self._span_records\n self._span_records = combined[-self._max_span_records:]\n", "sub_path": "lightstep/recorder.py", "file_name": "recorder.py", "file_ext": "py", "file_size_in_byte": 13381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "basictracer.recorder.SpanRecorder", "line_number": 29, "usage_type": "name"}, {"api_name": "basictracer.recorder.SpanRecorder", "line_number": 43, "usage_type": "name"}, {"api_name": "sys.getsizeof", "line_number": 57, "usage_type": "call"}, {"api_name": "crouton.ttypes.LogRecord", "line_number": 61, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 61, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 88, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 121, "usage_type": "call"}, {"api_name": "ssl._create_default_https_context", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 131, "usage_type": "attribute"}, {"api_name": "crouton.ttypes.KeyValue", "line_number": 133, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 133, "usage_type": "name"}, {"api_name": "crouton.ttypes.KeyValue", "line_number": 134, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 134, "usage_type": "name"}, {"api_name": "crouton.ttypes.KeyValue", "line_number": 135, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 135, "usage_type": "name"}, {"api_name": "crouton.ttypes.Runtime", "line_number": 139, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 139, "usage_type": "name"}, {"api_name": "crouton.ttypes.Auth", "line_number": 147, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 147, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 148, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 153, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 157, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 164, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 219, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "crouton.ttypes.ReportRequest", "line_number": 262, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 262, "usage_type": "name"}, {"api_name": "jsonpickle.encode", "line_number": 271, "usage_type": "call"}, {"api_name": "jsonpickle.encode", "line_number": 276, "usage_type": "call"}, {"api_name": "crouton.ttypes.SpanRecord", "line_number": 298, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 298, "usage_type": "name"}, {"api_name": "crouton.ttypes.KeyValue", "line_number": 312, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 312, "usage_type": "name"}, {"api_name": "crouton.ttypes.TraceJoinId", "line_number": 318, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 318, "usage_type": "name"}, {"api_name": "crouton.ttypes.KeyValue", "line_number": 320, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 320, "usage_type": "name"}, {"api_name": "sys.getsizeof", "line_number": 326, "usage_type": "call"}, {"api_name": "crouton.ttypes.LogRecord", "line_number": 330, "usage_type": "call"}, {"api_name": "crouton.ttypes", "line_number": 330, "usage_type": "name"}]} +{"seq_id": "236011216", "text": "import json\nfrom os import listdir\nfrom os.path import isfile, join\nfrom player import player\nimport csv\nimport pdb\n\n\ndef getFiles(folder):\n return [f for f in listdir(folder) if isfile(join(folder, f))]\ndef jsonToList(path):\n with open(path) as f:\n t = f.read()\n return json.loads(t)\ndef distinctValues(d,value):\n s = set()\n for d1 in d:\n s.add(d1[value])\n return s\ndef addAdvanced(players):\n directory = 'nbaStats/advancedPlayer'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addAdvanced(d)\ndef addFourFactor(players):\n directory = 'nbaStats/playerFourFactor'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addShotCharts(d)\ndef addMisc(players):\n directory = 'nbaStats/playerMisc'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addMisc(d)\ndef addShotCharts(players):\n directory = 'nbaStats/playerShot'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addShotCharts(d)\ndef addUsage(players):\n directory = 'nbaStats/playerUsage'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addUsage(d)\ndef addBoxScore(players):\n directory = 'nbaStats/playerBoxScore'\n for path in getFiles(directory):\n id = int(path[:-5])\n d = jsonToList(directory + '/' + path)\n players[id].addBoxScore(d)\ndef loadPlayers():\n playerDict = jsonToList('nbaStats/player.txt')\n players = {}\n for d in playerDict:\n p = player.Player(d)\n players[p.id] = p\n addBoxScore(players)\n addAdvanced(players)\n addFourFactor(players)\n addMisc(players)\n addShotCharts(players)\n addUsage(players)\n return players\ndef loadGames():\n games = {}\n gameList = jsonToList('nbaStats/game.txt')\n for game in gameList:\n id = game['id']\n games[game['id']] = game\n return games\ndef groupByPosition(players):\n grouped = {}\n junkPos = ['RP','TE','RP','D']\n for player in players.values():\n pos = player.getPosition()\n if pos in junkPos:\n continue\n if pos in grouped:\n grouped[pos][player.id] = player\n else:\n group = {}\n group[player.id] = player\n grouped[pos] = group\n return grouped\n\n\ndef get_float_minutes(s):\n vals = s.split(':')\n minutes = float(vals[0])\n seconds = float(vals[0])\n return minutes + (seconds / 60)\n\n\ndef getAverage(d,key,seasons):\n total = 0.0\n count = 0\n for val in d:\n if val['season'] in seasons:\n try:\n if key == 'min':\n total += get_float_minutes(val[key])\n else:\n total += float(val[key])\n except ValueError:\n total += 0\n count += 1\n if count > 0:\n return total / float(count)\n else:\n return 0\nboxScoreCategories = ['min','fgm','fga','fg3m','fg3a','ftm','fta','oreb','dreb',\n 'ast','blk','stl','to','pf','pts','plus_minus',]\ndef getBoxScoreAverages(player,seasons):\n result = []\n for category in boxScoreCategories:\n result.append(getAverage(player.boxScore,category,seasons))\n return result\nadvancedCategories = ['off_rating','def_rating','ast_pct','ast_tov','ast_ratio',\n 'oreb_pct','dreb_pct','treb_pct','tm_tov_pct','efg_pct',\n 'ts_pct','usg_pct','pace','pie']\ndef getAdvancedAverages(player,seasons):\n result = []\n for category in advancedCategories:\n result.append(getAverage(player.advanced,category,seasons))\n return result\n\n\nshotChartCategories = ['minutes_remaining','seconds_remaining','event_type','action_type','shot_type',\n 'shot_distance','loc_x','loc_y','shot_attempted_flag','shot_made_flag',\n 'shot_zone_basic','shot_zone_area','shot_zone_range']\nvaluesDict = {}\n\ndef getDistinctValues(keys,l):\n for v in l:\n for k in keys:\n if k in valuesDict:\n valuesDict[k].add(v[k])\n else:\n s = set()\n s.add(v[k])\n valuesDict[k] = s\n\nnumericalShotCategories = ['shot_distance','shot_made_flag','loc_x','loc_y']\n\ncategoricalShotCategories = {\n 'shot_zone_area': ['Right Side(R)',\n 'Center(C)',\n 'Left Side(L)',\n 'Left Side Center(LC)',\n 'Back Court(BC)',\n 'Right Side Center(RC)'],\n 'action_type': ['Fadeaway Bank shot',\n 'Running Layup Shot',\n 'Jump Bank Hook Shot',\n 'Driving Floating Jump Shot',\n 'Step Back Jump shot',\n 'Slam Dunk Shot',\n 'Tip Layup Shot',\n 'Driving Dunk Shot',\n 'Hook Shot',\n 'Running Alley Oop Layup Shot',\n 'Running Tip Shot',\n 'Running Pull-Up Jump Shot',\n 'Fadeaway Jump Shot',\n 'Putback Dunk Shot',\n 'Tip Dunk Shot',\n 'Running Reverse Layup Shot',\n 'Running Finger Roll Layup Shot',\n 'Putback Layup Shot',\n 'Step Back Bank Jump Shot',\n 'No Shot',\n 'Turnaround Jump Shot',\n 'Floating Jump shot',\n 'Jump Shot',\n 'Layup Shot',\n 'Reverse Layup Shot',\n 'Turnaround Fadeaway shot',\n 'Hook Bank Shot',\n 'Driving Jump shot',\n 'Cutting Dunk Shot',\n 'Driving Hook Shot',\n 'Running Alley Oop Dunk Shot',\n 'Alley Oop Layup shot',\n 'Turnaround Bank Hook Shot',\n 'Jump Bank Shot',\n 'Reverse Dunk Shot',\n 'Driving Floating Bank Jump Shot',\n 'Turnaround Fadeaway Bank Jump Shot',\n 'Running Dunk Shot',\n 'Tip Shot',\n 'Running Jump Shot',\n 'Running Hook Shot',\n 'Putback Slam Dunk Shot',\n 'Driving Bank Hook Shot',\n 'Turnaround Hook Shot',\n 'Cutting Finger Roll Layup Shot',\n 'Running Bank shot',\n 'Pullup Jump shot',\n 'Alley Oop Dunk Shot',\n 'Reverse Slam Dunk Shot',\n 'Running Slam Dunk Shot',\n 'Driving Bank shot',\n 'Driving Slam Dunk Shot',\n 'Running Reverse Dunk Shot',\n 'Pullup Bank shot',\n 'Jump Hook Shot',\n 'Driving Reverse Dunk Shot',\n 'Driving Finger Roll Layup Shot',\n 'Dunk Shot',\n 'Turnaround Bank shot',\n 'Driving Layup Shot',\n 'Cutting Layup Shot',\n 'Driving Reverse Layup Shot',\n 'Running Bank Hook Shot',\n 'Finger Roll Layup Shot'],\n 'shot_zone_range': ['16-24 ft.',\n 'Back Court Shot',\n '24+ ft.',\n 'Less Than 8 ft.',\n '8-16 ft.'],\n 'shot_zone_basic': ['Left Corner 3',\n 'Right Corner 3',\n 'Mid-Range',\n 'In The Paint (Non-RA)',\n 'Restricted Area',\n 'Backcourt',\n 'Above the Break 3'],\n 'event_type': ['Missed Shot',\n 'Made Shot'],\n 'shot_type': ['3PT Field Goal',\n '2PT Field Goal']\n}\n\n\ndef getCategoricalShotCategories():\n result = []\n for category in categoricalShotCategories:\n for c_type in categoricalShotCategories[category]:\n result.append('{}-{}'.format(category,c_type))\n return result\n\ndef getShotChartAverages(player,seasons):\n result = []\n for category in numericalShotCategories:\n result.append(getAverage(player.shotCharts,category,seasons))\n\n player_category_stats = {}\n\n for category in categoricalShotCategories:\n category_dict = {}\n for c_type in categoricalShotCategories[category]:\n category_dict[c_type] = 0\n player_category_stats[category] = category_dict\n\n for shot_chart in player.shotCharts:\n for category in categoricalShotCategories:\n c_type = shot_chart[category]\n player_category_stats[category][c_type] += 1\n\n shot_chart_count = float(len(player.shotCharts))\n\n for category in player_category_stats:\n category_dict = player_category_stats[category]\n for c_type in category_dict:\n val = category_dict[c_type]\n if shot_chart_count > 0:\n result.append(float(val) / shot_chart_count)\n else:\n result.append(0)\n\n\n\n\n #getDistinctValues(shotChartCategories,player.shotCharts)\n return result\n\n\ndef getTotalGamesPlayed(player, seasons):\n games = set()\n for val in player.boxScore:\n if val['season'] in seasons:\n games.add(val['game_id'])\n return len(games)\n\n\ndef getAveStats(players,games,seasons):\n result = []\n for player in players.values():\n playerStats = [player.id,player.name,player.getPosition()]\n playerStats.append(getTotalGamesPlayed(player,seasons))\n playerStats.extend(getBoxScoreAverages(player,seasons))\n playerStats.extend(getAdvancedAverages(player,seasons))\n playerStats.extend(getShotChartAverages(player,seasons))\n result.append(playerStats)\n return result\ndef getOutputName(seasons):\n result = 'averages/'\n result += seasons[0]\n if len(seasons) > 1:\n result += '-' + seasons[len(seasons)-1]\n result += '.csv'\n return result\n\n\ndef clean_row(row):\n row[1] = row[1].replace(',','')\n return row\n\n\ndef outputStatsCsv(allPlayers,games,seasons):\n outputName = getOutputName(seasons)\n aveStats = getAveStats(allPlayers,games,seasons)\n header = ['id','name','position','recorded_games']\n header.extend(boxScoreCategories)\n header.extend(advancedCategories)\n header.extend(numericalShotCategories)\n header.extend(getCategoricalShotCategories())\n for i in range(0,len(header)):\n print('#{} : {}'.format(i-3,header[i]))\n with open(outputName,'w',newline='') as f:\n writer = csv.writer(f,delimiter=',')\n writer.writerow(header)\n for item in aveStats:\n row = clean_row(item)\n if row[3] >= 20 and row[4] >= 10 and row[2] != 'None' and row[2] != 'Junk':\n writer.writerow(row)\n\n\ndef main():\n games = loadGames()\n allPlayers = loadPlayers()\n for player in allPlayers.values():\n getShotChartAverages(player,['2010','2011','2012','2013','2014','2015','2016'])\n with open('shotChartValues.txt','w',newline='') as f:\n for v in valuesDict:\n f.write(v + ': ' + str(valuesDict[v]) + '\\n')\n print('done')\n outputStatsCsv(allPlayers,games,['2010'])\n outputStatsCsv(allPlayers,games,['2011'])\n outputStatsCsv(allPlayers,games,['2012'])\n outputStatsCsv(allPlayers,games,['2013'])\n outputStatsCsv(allPlayers,games,['2014'])\n outputStatsCsv(allPlayers,games,['2015'])\n outputStatsCsv(allPlayers,games,['2016'])\n outputStatsCsv(allPlayers,games,['2010','2011','2012','2013','2014','2015','2016'])\n\n #grouped_by_position = groupByPosition(allPlayers)\n #for position in grouped_by_position:\n # print(position + ': ' + str(len(grouped_by_position[position])))\nif __name__ == '__main__':\n main()\n", "sub_path": "DataMiningProject/organizeNBAdata.py", "file_name": "organizeNBAdata.py", "file_ext": "py", "file_size_in_byte": 12231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}, {"api_name": "player.player.Player", "line_number": 60, "usage_type": "call"}, {"api_name": "player.player", "line_number": 60, "usage_type": "name"}, {"api_name": "player.player", "line_number": 79, "usage_type": "name"}, {"api_name": "player.player.getPosition", "line_number": 80, "usage_type": "call"}, {"api_name": "player.player", "line_number": 80, "usage_type": "name"}, {"api_name": "player.player.id", "line_number": 84, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 84, "usage_type": "name"}, {"api_name": "player.player.id", "line_number": 87, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 87, "usage_type": "name"}, {"api_name": "player.player.boxScore", "line_number": 121, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 121, "usage_type": "name"}, {"api_name": "player.player.advanced", "line_number": 129, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 129, "usage_type": "name"}, {"api_name": "player.player.shotCharts", "line_number": 250, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 250, "usage_type": "name"}, {"api_name": "player.player.shotCharts", "line_number": 260, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 260, "usage_type": "name"}, {"api_name": "player.player.shotCharts", "line_number": 265, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 265, "usage_type": "name"}, {"api_name": "player.player.boxScore", "line_number": 285, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 285, "usage_type": "name"}, {"api_name": "player.player", "line_number": 293, "usage_type": "name"}, {"api_name": "player.player.id", "line_number": 294, "usage_type": "attribute"}, {"api_name": "player.player", "line_number": 294, "usage_type": "name"}, {"api_name": "player.player.name", "line_number": 294, "usage_type": "attribute"}, {"api_name": "player.player.getPosition", "line_number": 294, "usage_type": "call"}, {"api_name": "player.player", "line_number": 295, "usage_type": "argument"}, {"api_name": "player.player", "line_number": 296, "usage_type": "argument"}, {"api_name": "player.player", "line_number": 297, "usage_type": "argument"}, {"api_name": "player.player", "line_number": 298, "usage_type": "argument"}, {"api_name": "csv.writer", "line_number": 326, "usage_type": "call"}, {"api_name": "player.player", "line_number": 337, "usage_type": "name"}, {"api_name": "player.player", "line_number": 338, "usage_type": "argument"}]} +{"seq_id": "384120725", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom __future__ import absolute_import\n\nimport datetime\nimport decimal\nfrom decimal import Decimal\nfrom decimal import ROUND_DOWN\nimport itertools\nimport logging\n\nfrom django.conf import settings\nfrom django.utils.html import escape\nfrom django.utils.translation import ugettext_lazy as _\n\nimport afloclient as aflo_client\nfrom afloclient import exc\n\nfrom horizon import exceptions\nfrom horizon.utils import functions as utils\nfrom horizon.utils.memoized import memoized # noqa\n\nfrom openstack_dashboard.api import base\n\nfrom nec_portal.local import nec_portal_settings as nec_set\n\nLOG = logging.getLogger(__name__)\nVERSIONS = base.APIVersionManager(\"ticket\", preferred_version=2)\n\nSCOPE_DEFAULT = 'Default'\n\nCURRENCY_FORMAT = getattr(nec_set, 'CURRENCY_FORMAT', '{0:,.2f}')\nPRICE_FORMAT = getattr(nec_set, 'PRICE_FORMAT', [',', '.', 2])\n\n\ndef _get_price_string(value):\n \"\"\"Get Price string from value.\n :Param value: price string\n \"\"\"\n try:\n if 0 < PRICE_FORMAT[2]:\n rd_format = '.' + '1'.zfill(PRICE_FORMAT[2])\n price = Decimal(value).quantize(Decimal(rd_format),\n rounding=ROUND_DOWN)\n else:\n price = Decimal(value).quantize(Decimal('1.'),\n rounding=ROUND_DOWN)\n\n return CURRENCY_FORMAT.format(price)\n\n except (TypeError, decimal.InvalidOperation):\n return value\n\n\nclass ProjectCatalog(object):\n '''Project Catalog Class\n '''\n def __init__(self,\n catalog_id,\n scope,\n seq_no,\n catalog_name,\n price,\n project_id):\n\n self.catalog_id = escape(catalog_id)\n self.scope = escape(scope)\n self.seq_no = escape(seq_no)\n self.catalog_name = _(escape(catalog_name)) # noqa\n self.price = _get_price_string(escape(price))\n self.project_id = escape(project_id)\n\n\n@memoized\ndef afloclient(request, version='1'):\n url = base.url_for(request, 'ticket')\n insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)\n cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)\n return aflo_client.Client(version, url, token=request.user.token.id,\n insecure=insecure, cacert=cacert)\n\n\n# Get ticket list\ndef ticket_list_detailed(request,\n marker=None,\n sort_dir='desc',\n sort_key='created_at',\n filters=None,\n paginate=False,\n ticket_type=None):\n if not filters or ('ticket_id' not in filters):\n limit = getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n if marker:\n kwargs['marker'] = marker\n if filters or filters is not None:\n kwargs.update(filters)\n\n if ticket_type:\n if 'ticket_type' in kwargs:\n kwargs['ticket_type'] = [kwargs['ticket_type']]\n kwargs['ticket_type'].append(ticket_type)\n else:\n kwargs['ticket_type'] = ticket_type\n\n LOG.debug('Ticket List Filter= ' + str(kwargs))\n tickets_list = afloclient(request).tickets.list(kwargs)\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n tickets = list(itertools.islice(tickets_list, request_size))\n\n # first and middle page condition\n if len(tickets) > page_size:\n tickets.pop(-1)\n has_more_data = True\n # middle page condition\n if marker is not None:\n has_prev_data = True\n # first page condition when reached via prev back\n elif sort_dir == 'asc' and marker is not None:\n has_more_data = True\n # last page condition\n elif marker is not None:\n has_prev_data = True\n else:\n tickets = list(tickets_list)\n\n return (tickets, has_more_data, has_prev_data)\n\n else:\n ticket_list = []\n # if user selected ticket_id,\n # Use ticket-get API.\n try:\n ticket = ticket_get_detailed(request, filters['ticket_id'])\n\n # Filtering project id\n if 'tenant_id' in filters and \\\n ticket.tenant_id != filters['tenant_id']:\n\n return (ticket_list, False, False)\n\n # Convert get data to list data\n last_workflow = filter(lambda workflow:\n workflow['status'] == 1,\n ticket.workflow)\n setattr(ticket, 'last_workflow', last_workflow[0])\n ticket_list.append(ticket)\n\n except exc.HTTPNotFound:\n pass\n\n return (ticket_list, False, False)\n\n\n# Get ticket template list(get all data)\ndef tickettemplates_list_detailed_get_all(request, marker=None):\n kwargs = {}\n if marker:\n kwargs['marker'] = marker\n\n tickets_iter = afloclient(request).tickettemplates.list(kwargs)\n\n return tickets_iter\n\n\ndef ticket_get_detailed(request, ticket_id):\n ticket = afloclient(request).tickets.get(ticket_id)\n\n return ticket\n\n\n# Get ticket template list\ndef tickettemplates_list_detailed(request,\n ticket_type=None,\n marker=None,\n sort_dir=['desc'],\n sort_key=['id'],\n filters=None,\n paginate=False,\n enable_expansion_filters=False):\n limit = getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key,\n 'enable_expansion_filters': enable_expansion_filters, }\n\n if ticket_type:\n kwargs['ticket_type'] = ticket_type\n\n if marker:\n kwargs['marker'] = marker\n\n tickets_list = afloclient(request).tickettemplates.list(kwargs)\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n tickets = list(itertools.islice(tickets_list, request_size))\n\n # first and middle page condition\n if len(tickets) > page_size:\n tickets.pop(-1)\n has_more_data = True\n # middle page condition\n if marker is not None:\n has_prev_data = True\n # first page condition when reached via prev back\n elif sort_dir == ['asc'] and marker is not None:\n has_more_data = True\n # last page condition\n elif marker is not None:\n has_prev_data = True\n else:\n tickets = list(tickets_list)\n\n return (tickets, has_more_data, has_prev_data)\n\n\n# Get ticket template\ndef tickettemplates_get(request, target_id):\n ticket = afloclient(request).tickettemplates.get(target_id)\n\n return ticket\n\n\n# Crate ticket\ndef ticket_create(request, fields):\n afloclient(request).tickets.create(fields)\n\n\n# Update ticket\ndef ticket_update(request, ticket_id, fields):\n afloclient(request).tickets.update(ticket_id, fields)\n\n\n# Get contract list\ndef contract_list_detailed(request,\n marker=None,\n sort_dir='desc,desc',\n sort_key='lifetime_start,contract_id',\n filters=None,\n paginate=False):\n limit = getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n if marker:\n kwargs['marker'] = marker\n if filters or filters is not None:\n kwargs.update(filters)\n\n contract_list = afloclient(request).contracts.list(kwargs)\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n contracts = list(itertools.islice(contract_list, request_size))\n\n # first and middle page condition\n if len(contracts) > page_size:\n contracts.pop(-1)\n has_more_data = True\n # middle page condition\n if marker is not None:\n has_prev_data = True\n # first page condition when reached via prev back\n elif sort_dir == 'asc,asc' and marker is not None:\n has_more_data = True\n # last page condition\n elif marker is not None:\n has_prev_data = True\n else:\n contracts = list(contract_list)\n\n return (contracts, has_more_data, has_prev_data)\n\n\ndef contract_get_detailed(request, contract_id):\n return afloclient(request).contracts.get(contract_id)\n\n\ndef catalog_list_detailed(request,\n marker=None,\n limit=None,\n sort_key='catalog_id',\n sort_dir='desc',\n force_show_deleted=None,\n filters=None,\n paginate=False):\n\n limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n if marker is not None:\n kwargs['marker'] = marker\n if force_show_deleted is not None:\n kwargs['force_show_deleted'] = force_show_deleted\n if filters is not None:\n kwargs.update(filters)\n\n catalog_list = afloclient(request).catalogs.list(kwargs)\n\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n catalogs = list(itertools.islice(catalog_list, request_size))\n\n if sort_dir == 'desc':\n if len(catalogs) > page_size:\n catalogs.pop(-1)\n has_more_data = True\n else:\n has_more_data = False\n\n if marker is not None:\n has_prev_data = True\n else:\n if len(catalogs) > page_size:\n catalogs.pop(-1)\n has_prev_data = True\n else:\n has_prev_data = False\n\n has_more_data = True\n catalogs.reverse()\n else:\n catalogs = list(catalog_list)\n\n return (catalogs, has_prev_data, has_more_data)\n\n\ndef catalog_get_detailed(request, catalog_id):\n return afloclient(request).catalogs.get(catalog_id)\n\n\ndef catalog_contents_get_detailed(request, catalog_id):\n return afloclient(request).catalog_contents.get(catalog_id)\n\n\ndef price_list_detailed(request,\n catalog_id,\n scope=None,\n lifetime=None,\n marker=None,\n limit=None,\n sort_key='lifetime_start',\n sort_dir='desc',\n force_show_deleted=None,\n filters=None,\n paginate=False):\n\n limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n if scope:\n kwargs['scope'] = scope\n if lifetime:\n kwargs['lifetime'] = lifetime\n if marker is not None:\n kwargs['marker'] = marker\n if force_show_deleted is not None:\n kwargs['force_show_deleted'] = force_show_deleted\n if filters is not None:\n kwargs.update(filters)\n\n price_list = afloclient(request).price.list(catalog_id, kwargs)\n\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n prices = list(itertools.islice(price_list, request_size))\n\n if sort_dir == 'desc':\n if len(prices) > page_size:\n prices.pop(-1)\n has_more_data = True\n else:\n has_more_data = False\n\n if marker is not None:\n has_prev_data = True\n else:\n if len(prices) > page_size:\n prices.pop(-1)\n has_prev_data = True\n else:\n has_prev_data = False\n\n has_more_data = True\n prices.reverse()\n\n else:\n prices = list(price_list)\n\n return (prices, has_prev_data, has_more_data)\n\n\ndef catalog_price_list(request,\n project_id,\n marker=None,\n limit=None,\n sort_key=None,\n sort_dir=None,\n force_show_deleted=None,\n filters=None,\n paginate=False):\n\n details = []\n\n _prev = False\n _more = False\n\n try:\n catalogs, _prev, _more = \\\n catalog_list_detailed(request,\n marker=marker,\n sort_key=sort_key,\n sort_dir=sort_dir,\n force_show_deleted=force_show_deleted,\n filters=filters,\n paginate=paginate)\n\n lifetime = get_datetime_now()\n for catalog in catalogs:\n prices, unsued_p, unsed_n = price_list_detailed(request,\n catalog.catalog_id,\n lifetime=lifetime)\n if prices is None or len(prices) == 0:\n continue\n\n price = None\n for p in prices:\n if p.scope and p.scope == project_id:\n price = p\n break\n else:\n if price is None and p.scope and p.scope == SCOPE_DEFAULT:\n price = p\n\n if price is None:\n continue\n\n detail = ProjectCatalog(catalog.catalog_id,\n price.scope,\n price.seq_no,\n catalog.catalog_name,\n price.price,\n project_id)\n\n details.append(detail)\n\n except Exception:\n _prev = False\n _more = False\n exceptions.handle(request,\n _(\"Unable to retrieve project catalog list.\"))\n\n return details, _prev, _more\n\n\ndef price_get_with_project_id(request, project_id, catalog_id, scope, seq_no):\n\n prices, _prev, _more = price_list_detailed(request, catalog_id,\n filters={'scope':\n project_id},\n lifetime=get_datetime_now(),\n paginate=False)\n\n price = prices[0] if prices and 0 < len(prices) else None\n\n if price is None:\n price = afloclient(request).price.get(catalog_id, scope, seq_no)\n\n return price\n\n\ndef price_update_or_create(request,\n catalog_id,\n scope,\n fields,\n now=None,\n del_flg=False):\n\n if now is None:\n now = get_datetime_now()\n\n old_lifetime = datetime.datetime.strptime(now, '%Y-%m-%dT%H:%M:%S.%f')\n old_lifetime = old_lifetime - datetime.timedelta(seconds=1)\n old_lifetime_str = old_lifetime.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n new_lifetime = now\n fields[\"lifetime_start\"] = new_lifetime\n fields[\"lifetime_end\"] = \"9999-12-31T23:59:59.999999\"\n\n prices, _prev, _more = price_list_detailed(request, catalog_id,\n filters={'scope': scope},\n lifetime=now,\n paginate=False)\n\n price = prices[0] if prices and 0 < len(prices) else None\n\n if price is not None:\n afloclient(request).price.update(price.catalog_id,\n price.scope,\n price.seq_no,\n {\"lifetime_end\": old_lifetime_str})\n if del_flg:\n return {}\n\n return afloclient(request).price.create(catalog_id,\n scope,\n fields)\n\n\ndef price_list_detailed2(request, catalog_id):\n kwargs = {'lifetime': get_datetime_utcnow(), }\n return afloclient(request).price.list(catalog_id, kwargs)\n\n\ndef get_datetime_now():\n now = datetime.datetime.utcnow()\n return now.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n\ndef get_datetime_utcnow():\n utcnow = datetime.datetime.utcnow()\n return utcnow.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n\nclass ValidCatalog(object):\n '''Valid Catalog Class\n '''\n def __init__(self,\n catalog_id,\n catalog_name,\n public_seq_no,\n public_price,\n private_seq_no,\n private_price,\n project_id):\n\n self.catalog_id = escape(catalog_id)\n self.catalog_name = _(escape(catalog_name)) # noqa\n self.public_seq_no = escape(public_seq_no)\n self.public_price = _get_price_string(\n escape(_get_format_price(public_price)))\n self.private_seq_no = escape(private_seq_no)\n self.private_price = _get_price_string(\n escape(_get_format_price(private_price)))\n self.project_id = escape(project_id)\n\n\ndef catalog_scope_list(request,\n project_id,\n marker=None,\n limit=None,\n sort_key=None,\n sort_dir=None,\n force_show_deleted=None,\n filters=None,\n paginate=False):\n\n catalog_scope_lists = []\n public_lists = {}\n private_lists = {}\n\n _prev = False\n _more = False\n\n try:\n catalogs, _prev, _more = catalog_list_detailed(\n request,\n marker=marker,\n sort_key=sort_key,\n sort_dir=sort_dir,\n force_show_deleted=force_show_deleted,\n filters=filters,\n paginate=paginate)\n\n lifetime = get_datetime_now()\n res_public, unused_p, unused_m = valid_catalog_list(request,\n refine_flg=True,\n lifetime=lifetime)\n catalog_id_wk = None\n for public_wk in res_public:\n if catalog_id_wk == public_wk.catalog_id:\n continue\n public_lists[public_wk.catalog_id] = public_wk\n catalog_id_wk = public_wk.catalog_id\n\n res_private, unused_p, unused_m = valid_catalog_list(request,\n scope=project_id,\n refine_flg=True,\n lifetime=lifetime)\n catalog_id_wk = None\n for private_wk in res_private:\n if catalog_id_wk == private_wk.catalog_id:\n continue\n private_lists[private_wk.catalog_id] = private_wk\n catalog_id_wk = private_wk.catalog_id\n\n for catalog in catalogs:\n seq_no_pub = None\n seq_no_pri = None\n price_pub = None\n price_pri = None\n\n if catalog.catalog_id in public_lists:\n seq_no_pub = public_lists[catalog.catalog_id].price_seq_no\n price_pub = public_lists[catalog.catalog_id].price\n if catalog.catalog_id in private_lists:\n seq_no_pri = private_lists[catalog.catalog_id].price_seq_no\n price_pri = private_lists[catalog.catalog_id].price\n\n catalog_scope = ValidCatalog(catalog.catalog_id,\n catalog.catalog_name,\n seq_no_pub,\n price_pub,\n seq_no_pri,\n price_pri,\n project_id)\n\n catalog_scope_lists.append(catalog_scope)\n\n except Exception:\n _prev = False\n _more = False\n exceptions.handle(request,\n _(\"Unable to retrieve catalog scope list.\"))\n\n return catalog_scope_lists, _prev, _more\n\n\ndef valid_catalog_list(request,\n catalog_id=None,\n scope='Default',\n refine_flg=None,\n lifetime=None,\n marker=None,\n limit=None,\n sort_key='catalog_id',\n sort_dir='asc',\n filters=None,\n paginate=False):\n\n if lifetime is None:\n lifetime = get_datetime_now()\n\n limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n kwargs['scope'] = scope\n kwargs['lifetime'] = lifetime\n if catalog_id is not None:\n kwargs['catalog_id'] = catalog_id\n if refine_flg is not None:\n kwargs['refine_flg'] = refine_flg\n if marker is not None:\n kwargs['catalog_marker'] = marker.split('|')[0]\n kwargs['catalog_scope_marker'] = marker.split('|')[1]\n kwargs['price_marker'] = marker.split('|')[2]\n if filters is not None:\n kwargs.update(filters)\n\n valid_catalog = afloclient(request).valid_catalog.list(kwargs)\n\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n valid_catalog_wk = list(itertools.islice(valid_catalog, request_size))\n\n if sort_dir == 'desc':\n if len(valid_catalog_wk) > page_size:\n valid_catalog_wk.pop(-1)\n has_more_data = True\n else:\n has_more_data = False\n\n if marker is not None:\n has_prev_data = True\n else:\n if len(valid_catalog_wk) > page_size:\n valid_catalog_wk.pop(-1)\n has_prev_data = True\n else:\n has_prev_data = False\n\n has_more_data = True\n valid_catalog_wk.reverse()\n\n else:\n valid_catalog_wk = list(valid_catalog)\n\n return (valid_catalog_wk, has_prev_data, has_more_data)\n\n\ndef _get_format_price(value):\n if None == value:\n return \"-\"\n else:\n return value\n\n\ndef catalog_scope_update_or_create(request,\n catalog_id,\n scope,\n fields,\n now=None,\n del_flg=False):\n\n if now is None:\n now = get_datetime_now()\n\n old_lifetime = datetime.datetime.strptime(now, '%Y-%m-%dT%H:%M:%S.%f')\n old_lifetime = old_lifetime - datetime.timedelta(seconds=1)\n old_lifetime_str = old_lifetime.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n new_lifetime = now\n fields['lifetime_start'] = new_lifetime\n fields['lifetime_end'] = '9999-12-31T23:59:59.999999'\n\n catalog_scape, _prev, _more = catalog_scope_list_detailed(request,\n catalog_id,\n scope,\n now,\n paginate=False)\n\n catalog_scape = catalog_scape[0] \\\n if catalog_scape and 0 < len(catalog_scape) else None\n\n if catalog_scape is not None:\n afloclient(request).catalog_scope.update(\n catalog_scape.id,\n {'lifetime_end': old_lifetime_str})\n\n if del_flg:\n return {}\n\n return afloclient(request).catalog_scope.create(catalog_id,\n scope,\n fields)\n\n\ndef catalog_scope_list_detailed(request,\n catalog_id,\n scope=None,\n lifetime=None,\n marker=None,\n limit=None,\n sort_key='lifetime_start',\n sort_dir='desc',\n force_show_deleted=None,\n filters=None,\n paginate=False):\n\n limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)\n page_size = utils.get_page_size(request)\n\n if paginate:\n request_size = page_size + 1\n else:\n request_size = limit\n\n kwargs = {'limit': limit,\n 'sort_dir': sort_dir,\n 'sort_key': sort_key, }\n\n if catalog_id:\n kwargs['catalog_id'] = catalog_id\n if scope:\n kwargs['scope'] = scope\n if lifetime:\n kwargs['lifetime'] = lifetime\n if marker is not None:\n kwargs['marker'] = marker\n if force_show_deleted is not None:\n kwargs['force_show_deleted'] = force_show_deleted\n if filters is not None:\n kwargs.update(filters)\n\n c_scope_list = afloclient(request).catalog_scope.list(kwargs)\n\n has_prev_data = False\n has_more_data = False\n\n if paginate:\n catalog_scope = list(itertools.islice(c_scope_list, request_size))\n\n if sort_dir == 'desc':\n if len(catalog_scope) > page_size:\n catalog_scope.pop(-1)\n has_more_data = True\n else:\n has_more_data = False\n\n if marker is not None:\n has_prev_data = True\n else:\n if len(catalog_scope) > page_size:\n catalog_scope.pop(-1)\n has_prev_data = True\n else:\n has_prev_data = False\n\n has_more_data = True\n catalog_scope.reverse()\n\n else:\n catalog_scope = list(c_scope_list)\n\n return (catalog_scope, has_prev_data, has_more_data)\n", "sub_path": "nec_portal/api/ticket.py", "file_name": "ticket.py", "file_ext": "py", "file_size_in_byte": 27763, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.APIVersionManager", "line_number": 39, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 39, "usage_type": "name"}, {"api_name": "nec_portal.local.nec_portal_settings", "line_number": 43, "usage_type": "argument"}, {"api_name": "nec_portal.local.nec_portal_settings", "line_number": 44, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 54, "usage_type": "call"}, {"api_name": "decimal.ROUND_DOWN", "line_number": 55, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 57, "usage_type": "call"}, {"api_name": "decimal.ROUND_DOWN", "line_number": 58, "usage_type": "name"}, {"api_name": "decimal.InvalidOperation", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.utils.html.escape", "line_number": 77, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 78, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 79, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 80, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 80, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 81, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 82, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.url_for", "line_number": 87, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 87, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 88, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 89, "usage_type": "argument"}, {"api_name": "afloclient.Client", "line_number": 90, "usage_type": "call"}, {"api_name": "horizon.utils.memoized.memoized", "line_number": 85, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 103, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 104, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 104, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 133, "usage_type": "call"}, {"api_name": "afloclient.exc.HTTPNotFound", "line_number": 173, "usage_type": "attribute"}, {"api_name": "afloclient.exc", "line_number": 173, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 205, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 206, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 206, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 229, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 274, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 275, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 275, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 296, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 330, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 331, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 331, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 355, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 401, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 402, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 402, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 430, "usage_type": "call"}, {"api_name": "horizon.exceptions.handle", "line_number": 514, "usage_type": "call"}, {"api_name": "horizon.exceptions", "line_number": 514, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 515, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 546, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 546, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 547, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 580, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 580, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 585, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 585, "usage_type": "attribute"}, {"api_name": "django.utils.html.escape", "line_number": 601, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 602, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 602, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 603, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 605, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 606, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 608, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 609, "usage_type": "call"}, {"api_name": "horizon.exceptions.handle", "line_number": 687, "usage_type": "call"}, {"api_name": "horizon.exceptions", "line_number": 687, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 688, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 708, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 709, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 709, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 739, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 783, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 783, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 784, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 825, "usage_type": "argument"}, {"api_name": "horizon.utils.functions.get_page_size", "line_number": 826, "usage_type": "call"}, {"api_name": "horizon.utils.functions", "line_number": 826, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 856, "usage_type": "call"}]} +{"seq_id": "261422052", "text": "#####################################################################################################\n# LGBIO2050 - TP1 : PCA & ICA\n# Helper Functions to plot signals\n#####################################################################################################\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np \n\nimport os\nroot = os.getcwd()\nfrom pathlib import Path\n\nplt.rc('xtick', labelsize=16) \nplt.rc('ytick', labelsize=16) \nplt.rcParams.update({'font.size': 16}) \n\n\"\"\"--------------------------------------------------------------------------------------------------\nPLOT DATA POINTS IN A 2-DIMENSIONAL SPACE\nINPUTS: \n - x : list of coordinates along the first axis\n - y : list of coordinates along the second axis\n - title : title of the graph\n - x_label : label of the x axis\n - y_label : label of the y axis \n - show_fig : True if the plot must be displayed on screen, False otherwise\n - (file_path) : path where the graph must be saved (if needed)\n--------------------------------------------------------------------------------------------------\"\"\"\ndef scatter_plot(x, y, title, x_label, y_label, show_fig, file_path=None): \n plt.scatter(x, y, marker='.')\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n plt.tight_layout()\n\n # Save file\n if file_path:\n if not os.path.exists(Path(file_path).parent):\n os.makedirs(Path(file_path).parent)\n plt.savefig(file_path)\n\n # Display graph on screen \n if show_fig:\n plt.show()\n plt.close()\n\n\n\"\"\"--------------------------------------------------------------------------------------------------\nPLOT ECG SIGNALS DEPENDING ON THE TIME\nINPUTS: \n - ecg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m \n first row must be the time vector! \n - show_fig : True if the plot must be displayed on screen, False otherwise\n - (file_path) : path where the graph must be saved (if needed)\n--------------------------------------------------------------------------------------------------\"\"\"\ndef ecg_plot(ecg_signals, show_fig, ch_names, file_path=None):\n\n time = np.linspace(0, (ecg_signals.shape[1]/1000), ecg_signals.shape[1])\n # Same y scale for all channels\n bottom = np.amin(ecg_signals[0:ecg_signals.shape[1]])\n top = np.amax(ecg_signals[0:ecg_signals.shape[1]])\n\n # Plot each channel\n for idx in range(1, ecg_signals.shape[0]+1):\n fig = plt.figure(figsize=(16,8))\n if idx == ecg_signals.shape[0]-1:\n fig.patch.set_alpha(0) \n plt.plot(time, ecg_signals[idx-1], linewidth=1)\n plt.ylim(bottom, top)\n plt.title(ch_names[idx-1])\n \n\n # Save file\n if file_path:\n if not os.path.exists(Path(file_path).parent):\n os.makedirs(Path(file_path).parent)\n plt.savefig(file_path)\n\n # Display graph on screen\n if show_fig:\n plt.show()\n plt.close()\n\n\n\"\"\"--------------------------------------------------------------------------------------------------\nPLOT ECG SIGNALS DEPENDING ON THE TIME\nINPUTS: \n - ecg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m \n first row must be the time vector! \n - show_fig : True if the plot must be displayed on screen, False otherwise\n - (file_path) : path where the graph must be saved (if needed)\n--------------------------------------------------------------------------------------------------\"\"\"\ndef ecg_plotbis(ecg_signals, show_fig, ch_names, target, file_path=None):\n\n time = np.linspace(0, (ecg_signals.shape[1]/1000), ecg_signals.shape[1])\n bottom = np.amin(ecg_signals[0:ecg_signals.shape[1]])\n top = np.amax(ecg_signals[0:ecg_signals.shape[1]])\n\n\n for idx in range(1, ecg_signals.shape[0]+1):\n fig = plt.figure(figsize=(16,8))\n if idx == ecg_signals.shape[0]-1:\n fig.patch.set_alpha(0)\n \n plt.plot(time, ecg_signals[idx-1], linewidth=1)\n for i in target:\n plt.vlines(x = i/1000, ymin = bottom, ymax = top)\n plt.ylim(bottom, top)\n plt.title(ch_names[idx-1])\n \n \n\n # Save file\n if file_path:\n if not os.path.exists(Path(file_path).parent):\n os.makedirs(Path(file_path).parent)\n plt.savefig(file_path)\n\n # Display graph on screen\n if show_fig:\n plt.show()\n plt.close()\n \n \n \n\n\"\"\"--------------------------------------------------------------------------------------------------\nPLOT EEG SIGNALS DEPENDING ON THE TIME\nINPUTS: \n - eeg_signals : a matrix of [(n+1)xm] dimensions where n (nb of channels) << m \n first row must be the time vector! \n - label : list of n strings with channel names (do not consider time)\n - show_fig : True if the plot must be displayed on screen, False otherwise\n - (file_path) : path where the graph must be saved (if needed)\n--------------------------------------------------------------------------------------------------\"\"\"\ndef eeg_plot(eeg_signals, label, show_fig, file_path=None): \n # Same y scale for all channels\n bottom = np.amin(eeg_signals[1:eeg_signals.shape[0]])\n top = np.amax(eeg_signals[1:eeg_signals.shape[0]])\n\n # One big figure to frame the whole\n fig = plt.figure(figsize=(12,8))\n ax0 = fig.add_subplot(111) \n plt.subplots_adjust(hspace=-0.5)\n ax0.tick_params(labelcolor='black', top=False, bottom=False, left=False, right=False)\n\n # Plot each channel\n for idx in range(1, eeg_signals.shape[0]):\n if idx == 1 :\n _ax = fig.add_subplot(eeg_signals.shape[0]-1, 1, idx)\n ax = _ax\n else: \n ax = fig.add_subplot(eeg_signals.shape[0]-1, 1, idx, sharex=_ax)\n if idx == eeg_signals.shape[0]-1:\n ax.tick_params(labelcolor='black', top=False, bottom=True, left=False, right=False)\n ax.patch.set_alpha(0)\n ax.get_yaxis().set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_xlabel('Time (sec)')\n else:\n ax.axis('off') \n ax.plot(eeg_signals[0], eeg_signals[idx], linewidth=0.5)\n ax.set_ylim(bottom, top)\n plt.text(-0.45, 0, label[idx-1])\n\n ax0.get_yaxis().set_visible(False)\n ax0.get_xaxis().set_visible(False)\n\n # Save file\n if file_path:\n if not os.path.exists(Path(file_path).parent):\n os.makedirs(Path(file_path).parent)\n plt.savefig(file_path)\n \n # Display graph on screen\n if show_fig:\n plt.show()\n plt.close()\n ", "sub_path": "make_graphs.py", "file_name": "make_graphs.py", "file_ext": "py", "file_size_in_byte": 6610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.getcwd", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 77, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 116, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 117, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 172, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 173, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "562369743", "text": "import pandas as pd\nimport numpy as np\nimport talib as ta\nimport tushare as ts\nfrom matplotlib import rc\nimport re\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\nrc('mathtext', default='regular')\n\nbuy_stock_info = []\n\n# import seaborn as sns\n# sns.set_style('white')\n\ndef getHTMLText(url):\n try:\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"\"\n\ndef getCiXinList(list,stockURL='http://www.shdjt.com/flsort.asp?lb=993075'):\n getStockList(list,stockURL)\n\ndef getStockList(lst,stockURL='http://www.shdjt.com/flsort.asp?lb=399905'):\n html = getHTMLText(stockURL)\n soup = BeautifulSoup(html, 'html.parser')\n a = soup.find_all('a',class_='ared')\n b = soup.find_all('a',class_='ablack')\n c = soup.find_all('a',class_='ablue')\n a.extend(b)\n a.extend(c)\n for item in a:\n try:\n href = item.attrs['href']\n lst.append(re.findall(r\"\\d{6}\", href)[0])\n except:\n continue\n\n\n\ndef getStock():\n str = '''300741 华宝股份 603080 新疆火炬 603356 华菱精工 \n 603711 香飘飘 300732 设研院 300735 光弘科技 603059 倍加洁 \n 600025 华能水电 002918 蒙娜丽莎 603712 七一二 002927 泰永长征 \n 002922 伊戈尔 002916 深南电路 002923 润都股份 603056 德邦股份 \n 603329 上海雅仕 603809 豪能股份 300740 御家汇 603848 好太太 300730 \n 科创信息 601838 成都银行 603161 科华控股 002929 润建通信 603709 \n 中源家居 002920 德赛西威 300644 南京聚隆 603680 今创集团 002913 \n 奥士康 002915 中欣氟材 603895 天永智能 300737 科顺股份 603506 \n 南都物业 002921 联诚精密 300684 中石科技 002925 盈趣科技 603655\n 朗博科技 300624 万兴科技 300733 西菱动力 603871 嘉友国际 300731 \n 科创新源 600901 江苏租赁 300729 乐歌股份 002928 华夏航空 603477 \n 振静股份 601828 美凯龙 002926 华西证券 603516 \n 淳中科技 002917 金奥博 603283 赛腾股份 603890 春秋电子 603156 \n 养元饮品 002919 名臣健康 300738 奥飞数据 300739 明阳电路'''\n str_arr = str.split()\n stock_arr = []\n for item in str_arr:\n matchObj = re.match(r'(\\d)', item, re.M | re.I)\n if (matchObj):\n stock_arr.append(item)\n return stock_arr\n\n\ndef analyzeBuyMACD(df,code,days=30):\n if (df.shape[0] - 32 > days):\n stock_df = df.tail(2)\n #print(stock_df)\n date_str = time.strftime(\"%Y-%m-%d\", time.localtime())\n #print(date_str)\n macd_array = stock_df['macd'].tolist()\n date_array = stock_df['date'].tolist()\n dea_array = stock_df['dea'].tolist()\n diff_array = stock_df['diff'].tolist()\n open_array =stock_df['open'].tolist()\n high_array =stock_df['high'].tolist()\n\n if macd_array[1] > 0 and macd_array[1] < 0.5 and macd_array[0] < 0 and diff_array[1] < -2:\n if date_str == date_array[1]:\n buy_stock_info.append([code,date_array[1],diff_array[1]])\n print('Buy {} on {} when diff :{}'.format(code, date_array[1], diff_array[1]))\n else:\n pass\n\ndef analyzeSellMACD(df,code):\n stock_df = df.tail(2)\n\n macd_array = stock_df['macd'].tolist()\n date_array = stock_df['date'].tolist()\n dea_array = stock_df['dea'].tolist()\n diff_array = stock_df['diff'].tolist()\n open_array = stock_df['open'].tolist()\n high_array = stock_df['high'].tolist()\n if macd_array_slice[0]>macd_array_slice[1] and macd_array_slice[1]>0:\n print('Sell on '+code+' '+date_array[1])\n\ndef getMACD(code):\n dw = ts.get_k_data(code)\n close = dw.close.values\n dw['diff'], dw['dea'], dw['macd'] = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)\n return dw[['date', 'close', 'high', 'open', 'macd', 'diff', 'dea']]\n\n\nif __name__ == '__main__':\n #for stock in ['601952']:\n stock_list = []\n getCiXinList(stock_list)\n print(stock_list)\n for stock in stock_list:\n df = getMACD(stock)\n #print(df)\n analyzeBuyMACD(df,stock, days=30)\n for stock_item in buy_stock_info:\n print(stock_item)\n", "sub_path": "Ta_Lib/ta_lib_jenkins.py", "file_name": "ta_lib_jenkins.py", "file_ext": "py", "file_size_in_byte": 4346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.rc", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "re.match", "line_number": 65, "usage_type": "call"}, {"api_name": "re.M", "line_number": 65, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 65, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 75, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 75, "usage_type": "call"}, {"api_name": "tushare.get_k_data", "line_number": 104, "usage_type": "call"}, {"api_name": "talib.MACD", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "339957247", "text": "from MyUtil import MyUtil as MyUtil\nfrom ElasticNodes import ElasticNodes\nfrom MySingletons import MyDevice\n\nimport numpy as np\nimport torch\n\n\n# class ReverseLayerFunction(torch.autograd.Function):\n# @staticmethod\n# def forward(self, x, alpha=1.0):\n# self.alpha = alpha\n#\n# return x.view_as(x)\n#\n# @staticmethod\n# def backward(self, grad_output):\n# output = grad_output.neg() * self.alpha\n#\n# return output, None\n\n\nclass NeuralNetwork(ElasticNodes):\n layers = None\n layer_value = None\n output_layer_value = None\n\n weight = None\n bias = None\n momentum = None\n bias_momentum = None\n\n output_weight = None\n output_bias = None\n output_momentum = None\n output_bias_momentum = None\n\n activation_function = None\n output_activation_function = None\n loss_function = None\n\n learning_rate = 0.01\n momentum_rate = 0.95\n\n error_value = None\n loss_value = None\n classification_rate = None\n misclassified = None\n\n output_beta = None\n output_beta_decreasing_factor = None\n\n __Eh = None\n __Eh2 = None\n\n @property\n def number_hidden_layers(self):\n return len(self.layers) - 2\n\n @property\n def input_size(self):\n return self.layers[0]\n\n @property\n def output_size(self):\n return self.layers[-1]\n\n @property\n def output(self):\n return self.output_layer_value\n\n @property\n def raw_output(self):\n return torch.max(self.output, axis=1)\n\n @property\n def outputed_classes(self):\n return torch.argmax(self.output, axis=1)\n\n @property\n def residual_error(self):\n return 1 - self.raw_output.values\n\n ACTIVATION_FUNCTION_AFFINE = 1\n ACTIVATION_FUNCTION_SIGMOID = ACTIVATION_FUNCTION_AFFINE + 1\n ACTIVATION_FUNCTION_TANH = ACTIVATION_FUNCTION_SIGMOID + 1\n ACTIVATION_FUNCTION_RELU = ACTIVATION_FUNCTION_TANH + 1\n ACTIVATION_FUNCTION_LINEAR = ACTIVATION_FUNCTION_RELU + 1\n ACTIVATION_FUNCTION_SOFTMAX = ACTIVATION_FUNCTION_LINEAR + 1\n ACTIVATION_FUNCTION_REVERSE_LAYER = ACTIVATION_FUNCTION_SOFTMAX + 1\n\n LOSS_FUNCTION_MSE = ACTIVATION_FUNCTION_REVERSE_LAYER + 1\n LOSS_FUNCTION_CROSS_ENTROPY = LOSS_FUNCTION_MSE + 1\n\n PRUNE_NODE_STRATEGY_SINGLE = LOSS_FUNCTION_CROSS_ENTROPY + 1\n PRUNE_NODE_STRATEGY_MULTIPLE = PRUNE_NODE_STRATEGY_SINGLE + 1\n\n def __init__(self, layers: list, init_weights: bool = True):\n self.layers = layers\n\n self.weight = []\n self.bias = []\n self.momentum = []\n self.bias_momentum = []\n self.activation_function = []\n\n for i in range(self.number_hidden_layers):\n nodes_before = layers[i]\n nodes_after = layers[i + 1]\n\n if init_weights:\n self.weight.append(self.xavier_weight_initialization(nodes_after, nodes_before))\n self.bias.append(self.xavier_weight_initialization(1, nodes_after))\n self.momentum.append(torch.zeros(self.weight[i].shape, dtype=torch.float, device=MyDevice().get()))\n self.bias_momentum.append(torch.zeros(self.bias[i].shape, dtype=torch.float, device=MyDevice().get()))\n else:\n self.weight.append(None)\n self.bias.append(None)\n self.momentum.append(None)\n self.bias_momentum.append(None)\n self.momentum_rate = 0\n\n self.activation_function.append(self.ACTIVATION_FUNCTION_SIGMOID)\n\n if init_weights:\n nodes_before = layers[-2]\n nodes_after = layers[-1]\n\n self.output_weight = self.xavier_weight_initialization(nodes_after, nodes_before)\n self.output_bias = self.xavier_weight_initialization(1, nodes_after)\n self.output_momentum = torch.zeros(self.output_weight.shape, dtype=torch.float, device=MyDevice().get())\n self.output_bias_momentum = torch.zeros(self.output_bias.shape, dtype=torch.float, device=MyDevice().get())\n else:\n self.output_weight = None\n self.output_bias = None\n self.output_momentum = None\n self.output_bias_momentum = None\n self.momentum_rate = 0\n\n self.output_activation_function = self.ACTIVATION_FUNCTION_SOFTMAX\n self.loss_function = self.LOSS_FUNCTION_CROSS_ENTROPY\n\n ElasticNodes.__init__(self, len(self.layers))\n\n ##### Weight initializations #####\n\n def xavier_weight_initialization(self, n_out: int, n_in: int, uniform: bool = False):\n if uniform:\n return torch.nn.init.xavier_uniform(tensor=torch.zeros(int(n_out), int(n_in), dtype=torch.float,\n requires_grad=True, device=MyDevice().get()))\n return torch.nn.init.xavier_normal_(tensor=torch.zeros(int(n_out), int(n_in), dtype=torch.float,\n requires_grad=True, device=MyDevice().get()))\n\n def he_weight_initialization(self, n_out, n_in, shape=None):\n #TODO\n mean = 0.0\n sigma = np.sqrt(2 / n_in)\n if shape is None:\n shape = (n_out, n_in)\n return np.random.normal(mean, sigma, shape)\n\n ##### Noise #####\n\n def masking_noise(self, x: torch.tensor, noise_ratio: float = 0.0):\n return x.detach().masked_fill(torch.rand(x.shape, device=MyDevice().get()) <= noise_ratio, 0)\n\n ##### Activation functions #####\n\n @staticmethod\n def sigmoid(z: torch.tensor):\n return torch.sigmoid(z)\n\n @staticmethod\n def tanh(z):\n return torch.tanh(z)\n\n @staticmethod\n def relu(z):\n return torch.nn.functional.relu(z)\n\n @staticmethod\n def linear(layer_value: torch.tensor, weight: torch.tensor, bias: torch.tensor):\n return torch.nn.functional.linear(layer_value, weight, bias)\n\n @staticmethod\n def softmax(z, axis: int = 1):\n return torch.nn.functional.softmax(z, dim=axis)\n\n def reset_grad(self):\n for i in range(self.number_hidden_layers):\n self.weight[i] = self.weight[i].detach()\n self.bias[i] = self.bias[i].detach()\n self.weight[i].requires_grad = True\n self.bias[i].requires_grad = True\n\n self.output_weight = self.output_weight.detach()\n self.output_bias = self.output_bias.detach()\n self.output_weight.requires_grad = True\n self.output_bias.requires_grad = True\n\n def feedforward(self, x: torch.Tensor, y: torch.Tensor, train: bool = False):\n return self.forward_pass(x, train=train).calculate_error(y)\n\n def backpropagate(self):\n self.loss_value.backward()\n\n return self\n\n def test(self, x: torch.Tensor, y: torch.Tensor, is_beta_updatable: bool = False):\n self.feedforward(x=x, y=y)\n\n m = y.shape[0]\n\n true_classes = torch.argmax(y, axis=1)\n self.misclassified = torch.sum(torch.ne(self.outputed_classes, true_classes)).item()\n self.classification_rate = 1 - self.misclassified / m\n\n if is_beta_updatable:\n class_label = self.output_layer_value.max(axis=2)\n for i in range(m):\n if self.true_classes[i] == class_label[i]:\n self.output_beta = np.max(self.output_beta * self.output_beta_decreasing_factor, 0)\n self.output_beta_decreasing_factor = np.max(self.output_beta_decreasing_factor - 0.01, 0)\n else:\n self.output_beta = max(self.output_beta * (1 + self.output_beta_decreasing_factor), 1)\n self.output_beta_decreasing_factor = max(self.output_beta_decreasing_factor + 0.01, 1)\n\n return self\n\n def train(self, x: torch.Tensor, y: torch.Tensor, weight_no: int = None, is_neg_grad: bool = False):\n self.feedforward(x=x, y=y, train=True).backpropagate()\n\n if weight_no is None:\n for weight_no in range(self.number_hidden_layers, -1, -1):\n self.update_weight(weight_no=weight_no, is_neg_grad=is_neg_grad)\n else:\n self.update_weight(weight_no=weight_no, is_neg_grad=is_neg_grad)\n\n def update_weight(self, weight_no: int, is_neg_grad: bool = False):\n if weight_no >= self.number_hidden_layers:\n dW: torch.Tensor = self.learning_rate * self.output_weight.grad\n db: torch.Tensor = self.learning_rate * self.output_bias.grad\n if self.momentum_rate > 0:\n self.output_momentum: torch.Tensor = self.momentum_rate * self.output_momentum + dW\n self.output_bias_momentum: torch.Tensor = self.momentum_rate * self.output_bias_momentum + db\n dW: torch.Tensor = self.output_momentum\n db: torch.Tensor = self.output_bias_momentum\n if is_neg_grad:\n self.output_weight: torch.Tensor = self.output_weight - dW.neg()\n self.output_bias: torch.Tensor = self.output_bias - db.neg()\n else:\n self.output_weight: torch.Tensor = self.output_weight - dW\n self.output_bias: torch.Tensor = self.output_bias - db\n else:\n dW: torch.Tensor = self.learning_rate * self.weight[weight_no].grad\n db: torch.Tensor = self.learning_rate * self.bias[weight_no].grad\n if self.momentum_rate > 0:\n self.momentum[weight_no]: torch.Tensor = self.momentum_rate * self.momentum[weight_no] + dW\n self.bias_momentum[weight_no]: torch.Tensor = self.momentum_rate * self.bias_momentum[weight_no] + db\n dW: torch.Tensor = self.momentum[weight_no]\n db: torch.Tensor = self.bias_momentum[weight_no]\n if is_neg_grad:\n self.weight[weight_no]: torch.Tensor = self.weight[weight_no] - dW.neg()\n self.bias[weight_no]: torch.Tensor = self.bias[weight_no] - db.neg()\n else:\n self.weight[weight_no]: torch.Tensor = self.weight[weight_no] - dW\n self.bias[weight_no]: torch.Tensor = self.bias[weight_no] - db\n\n def forward_pass(self, x: torch.Tensor, train: bool = False):\n if train:\n self.reset_grad()\n self.layer_value = []\n self.layer_value.append(x)\n\n for i in range(self.number_hidden_layers):\n if self.activation_function[i] == self.ACTIVATION_FUNCTION_AFFINE:\n self.layer_value.append(self.linear(self.layer_value[i], self.weight[i], self.bias[i]))\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_SIGMOID:\n self.layer_value.append(self.sigmoid(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_TANH:\n self.layer_value.append(self.tanh(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_RELU:\n self.layer_value.append(self.relu(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_LINEAR:\n raise TypeError('Not implemented')\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_SOFTMAX:\n self.layer_value.append(self.softmax(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))\n elif self.activation_function[i] == self.ACTIVATION_FUNCTION_REVERSE_LAYER:\n self.layer_value.append(self.reverse_layer(self.layer_value[i]))\n\n if self.output_activation_function == self.ACTIVATION_FUNCTION_AFFINE:\n self.output_layer_value = self.linear(self.layer_value[-1], self.output_weight, self.output_bias)\n elif self.output_activation_function == self.ACTIVATION_FUNCTION_SIGMOID:\n self.output_layer_value = self.sigmoid(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))\n elif self.output_activation_function == self.ACTIVATION_FUNCTION_TANH:\n self.output_layer_value = self.tanh(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))\n elif self.output_activation_function == self.ACTIVATION_FUNCTION_RELU:\n self.output_layer_value = self.relu(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))\n elif self.output_activation_function == self.ACTIVATION_FUNCTION_SOFTMAX:\n self.output_layer_value = self.softmax(self.linear(self.layer_value[-1], self.output_weight, self.output_bias), axis=1)\n elif self.output_activation_function == self.ACTIVATION_FUNCTION_REVERSE_LAYER:\n self.output_layer_value = self.reverse_layer(self.layer_value[-1])\n\n return self\n\n def calculate_error(self, y: torch.tensor):\n self.error_value = y - self.output_layer_value\n\n if self.loss_function == self.LOSS_FUNCTION_MSE:\n self.loss_value = torch.nn.functional.mse_loss(self.output_layer_value, y)\n elif self.loss_function == self.LOSS_FUNCTION_CROSS_ENTROPY:\n self.loss_value = torch.nn.functional.cross_entropy(self.output_layer_value, torch.argmax(y, 1))\n\n return self\n\n def compute_expected_values(self, in_place: bool = False):\n self.data_mean, self.data_variance, self.data_standard_deviation = \\\n MyUtil.recursive_mean_standard_deviation(self.layer_value[0],\n self.data_mean,\n self.data_variance,\n self.number_samples_feed)\n\n self.Eh, self.Eh2 = self.compute_inbound_expected_values()\n\n def compute_inbound_expected_values(self, number_hidden_layer: int = None):\n nhl = number_hidden_layer # readability\n if nhl is None:\n nhl = self.number_hidden_layers - 1\n\n if nhl == 0:\n inference, center, std = (1, self.data_mean, self.data_standard_deviation)\n py = MyUtil.probit(center, std)\n Eh = inference * self.sigmoid(self.linear(self.weight[0], py, self.bias[0].T))\n else:\n Eh, _ = self.compute_inbound_expected_values(number_hidden_layer=nhl - 1)\n weight, bias = (self.weight[nhl], self.bias[nhl]) if nhl < self.number_hidden_layers + 1 else (self.output_weight, self.output_bias)\n Eh = self.sigmoid(self.linear(weight, Eh.T, bias.T))\n\n return Eh, Eh ** 2\n\n @property\n def Eh(self):\n return self.__Eh\n\n @Eh.setter\n def Eh(self, value: torch.tensor):\n self.__Eh = value\n\n @property\n def Eh2(self):\n return self.__Eh2\n\n @Eh2.setter\n def Eh2(self, value: torch.tensor):\n self.__Eh2 = value\n\n @property\n def Ey(self):\n return self.softmax(self.linear(self.output_weight, self.Eh.T, self.output_bias.T), axis=0)\n\n @property\n def Ey2(self):\n return self.softmax(self.linear(self.output_weight, self.Eh2.T, self.output_bias.T), axis=0)\n\n @property\n def network_variance(self):\n return MyUtil.frobenius_norm(self.Ey2 - self.Ey ** 2)\n\n def compute_bias(self, y):\n return MyUtil.frobenius_norm((self.Ey.T - y) ** 2)\n\n def width_adaptation_stepwise(self, y, prune_strategy: int = None):\n if prune_strategy is None:\n prune_strategy = self.PRUNE_NODE_STRATEGY_MULTIPLE\n\n nhl: int = self.number_hidden_layers\n\n self.number_samples_feed = self.number_samples_feed + 1\n self.number_samples_layer[nhl] = self.number_samples_layer[nhl] + 1\n self.compute_expected_values()\n\n self.bias_mean[nhl], self.bias_variance[nhl], self.bias_standard_deviation[nhl] = \\\n MyUtil.recursive_mean_standard_deviation(self.compute_bias(y),\n self.bias_mean[nhl],\n self.bias_variance[nhl],\n self.number_samples_feed)\n\n self.var_mean[nhl], self.var_variance[nhl], self.var_standard_deviation[nhl] = \\\n MyUtil.recursive_mean_standard_deviation(self.network_variance,\n self.var_mean[nhl],\n self.var_variance[nhl],\n self.number_samples_feed)\n\n if self.number_samples_layer[nhl] <= 1 or self.growable[nhl]:\n self.minimum_bias_mean[nhl] = self.bias_mean[nhl]\n self.minimum_bias_standard_deviation[nhl] = self.bias_standard_deviation[nhl]\n else:\n self.minimum_bias_mean[nhl] = np.min([self.minimum_bias_mean[nhl], self.bias_mean[nhl]])\n self.minimum_bias_standard_deviation[nhl] = np.min([self.minimum_bias_standard_deviation[nhl], self.bias_standard_deviation[nhl]])\n\n if self.number_samples_layer[nhl] <= self.input_size + 1 or self.prunable[nhl][0] != -1:\n self.minimum_var_mean[nhl] = self.var_mean[nhl]\n self.minimum_var_standard_deviation[nhl] = self.var_standard_deviation[nhl]\n else:\n self.minimum_var_mean[nhl] = np.min([self.minimum_var_mean[nhl], self.var_mean[nhl]])\n self.minimum_var_standard_deviation[nhl] = np.min([self.minimum_var_standard_deviation[nhl], self.var_standard_deviation[nhl]])\n\n self.BIAS.append(self.bias_mean[nhl])\n self.VAR.append(self.var_mean[nhl])\n\n if self.output_size == 512: # STL or CIFAR\n alpha_1 = 1.45\n alpha_2 = 0.95\n else:\n alpha_1 = 1.25\n alpha_2 = 0.75\n\n self.growable[nhl] = self.is_growable(self.compute_bias(y), alpha_1, alpha_2)\n self.prunable[nhl] = self.is_prunable(prune_strategy, 2 * alpha_1, 2 * alpha_2)\n\n def is_growable(self, bias: torch.tensor, alpha_1: float = 1.25, alpha_2: float = 0.75):\n nhl = self.number_hidden_layers # readability\n\n current = self.bias_mean[nhl] + self.bias_standard_deviation[nhl]\n biased_min = self.minimum_bias_mean[nhl] \\\n + (alpha_1 * torch.exp(-bias) + alpha_2) * self.minimum_bias_standard_deviation[nhl]\n\n if self.number_samples_layer[nhl] > 1 and current >= biased_min:\n return True\n return False\n\n def is_prunable(self, prune_strategy: int = None, alpha_1: float = 2.5, alpha_2: float = 1.5):\n if prune_strategy is None:\n prune_strategy = self.PRUNE_NODE_STRATEGY_MULTIPLE\n nhl = self.number_hidden_layers # readability\n\n current = self.var_mean[nhl] + self.var_standard_deviation[nhl]\n biased_min = self.minimum_var_mean[nhl] \\\n + (alpha_1 * torch.exp(-self.network_variance) + alpha_2) * self.minimum_var_standard_deviation[nhl]\n\n if not self.growable[nhl] \\\n and self.layers[nhl] > 1 \\\n and self.number_samples_layer[nhl] > self.input_size + 1 \\\n and current >= biased_min:\n\n if prune_strategy == self.PRUNE_NODE_STRATEGY_SINGLE:\n return torch.argmin(self.Eh)\n elif prune_strategy == self.PRUNE_NODE_STRATEGY_MULTIPLE:\n nodes_to_prune = torch.where(self.Eh < torch.abs(torch.mean(self.Eh) - torch.var(self.Eh)))\n if len(nodes_to_prune[0]):\n return nodes_to_prune[0]\n else:\n return torch.argmin(self.Eh)\n\n return [-1]\n\n def grow_node(self, layer_number: int):\n self.layers[layer_number] += 1\n if layer_number >= 0:\n self.grow_weight_row(layer_number - 1)\n self.grow_bias(layer_number - 1)\n if layer_number <= self.number_hidden_layers:\n self.grow_weight_column(layer_number)\n\n def grow_weight_row(self, layer_number: int):\n def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):\n tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(1, n_out)), axis=0)\n momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(1, n_out, dtype=torch.float, device=MyDevice().get())), axis=0)\n return tensor_data, momentum_tensor_data\n\n if layer_number >= len(self.weight):\n [_, n_out] = self.output_weight.shape\n self.output_weight, self.output_momentum = add_element(self.output_weight, self.output_momentum, n_out)\n else:\n [_, n_out] = self.weight[layer_number].shape\n self.weight[layer_number], self.momentum[layer_number] = add_element(self.weight[layer_number], self.momentum[layer_number], n_out)\n\n def grow_weight_column(self, layer_number: int):\n def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):\n tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(n_out, 1)), axis=1)\n momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(n_out, 1, dtype=torch.float, device=MyDevice().get())), axis=1)\n return tensor_data, momentum_tensor_data\n\n if layer_number >= len(self.weight):\n [n_out, _] = self.output_weight.shape\n self.output_weight, self.output_momentum = add_element(self.output_weight, self.output_momentum, n_out)\n else:\n [n_out, _] = self.weight[layer_number].shape\n self.weight[layer_number], self.momentum[layer_number] = add_element(self.weight[layer_number], self.momentum[layer_number], n_out)\n\n def grow_bias(self, layer_number):\n def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):\n tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(1, n_out)), axis=1)\n momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(1, n_out, dtype=torch.float, device=MyDevice().get())), axis=1)\n return tensor_data, momentum_tensor_data\n\n if layer_number >= len(self.bias):\n [n_out, _] = self.output_bias.shape\n self.output_bias, self.output_bias_momentum = add_element(self.output_bias, self.output_bias_momentum, n_out)\n else:\n [n_out, _] = self.bias[layer_number].shape\n self.bias[layer_number], self.bias_momentum[layer_number] = add_element(self.bias[layer_number], self.bias_momentum[layer_number], n_out)\n pass\n\n def prune_node(self, layer_number: int, node_number: int):\n self.layers[layer_number] -= 1\n if layer_number >= 0:\n self.prune_weight_row(layer_number - 1, node_number)\n self.prune_bias(layer_number - 1, node_number)\n if layer_number <= self.number_hidden_layers:\n self.prune_weight_column(layer_number, node_number)\n\n def prune_weight_row(self, layer_number: int, node_number: int):\n def remove_nth_row(tensor_data: torch.tensor, n: int):\n return torch.cat([tensor_data[:n], tensor_data[n+1:]])\n\n if layer_number >= len(self.weight):\n self.output_weight = remove_nth_row(self.output_weight, node_number)\n self.output_momentum = remove_nth_row(self.output_momentum, node_number)\n else:\n self.weight[layer_number] = remove_nth_row(self.weight[layer_number], node_number)\n self.momentum[layer_number] = remove_nth_row(self.momentum[layer_number], node_number)\n\n def prune_weight_column(self, layer_number: int, node_number: int):\n def remove_nth_column(weight_tensor: torch.tensor, n: int):\n return torch.cat([weight_tensor.T[:n], weight_tensor.T[n+1:]]).T\n\n if layer_number >= len(self.weight):\n self.output_weight = remove_nth_column(self.output_weight, node_number)\n self.output_momentum = remove_nth_column(self.output_momentum, node_number)\n else:\n self.weight[layer_number] = remove_nth_column(self.weight[layer_number], node_number)\n self.momentum[layer_number] = remove_nth_column(self.momentum[layer_number], node_number)\n\n def prune_bias(self, layer_number: int, node_number: int):\n def remove_nth_element(bias_tensor: torch.tensor, n: int):\n bias_tensor = torch.cat([bias_tensor[0][:n], bias_tensor[0][n+1:]])\n return bias_tensor.view(1, bias_tensor.shape[0])\n\n if layer_number >= len(self.bias):\n self.output_bias = remove_nth_element(self.output_bias, node_number)\n self.output_bias_momentum = remove_nth_element(self.output_bias_momentum, node_number)\n else:\n self.bias[layer_number] = remove_nth_element(self.bias[layer_number], node_number)\n self.bias_momentum[layer_number] = remove_nth_element(self.bias_momentum[layer_number], node_number)", "sub_path": "NeuralNetwork.py", "file_name": "NeuralNetwork.py", "file_ext": "py", "file_size_in_byte": 24881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "ElasticNodes.ElasticNodes", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 114, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 115, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 131, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 132, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 132, "usage_type": "call"}, {"api_name": "ElasticNodes.ElasticNodes.__init__", "line_number": 143, "usage_type": "call"}, {"api_name": "ElasticNodes.ElasticNodes", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 149, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 151, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 165, "usage_type": "call"}, {"api_name": "MySingletons.MyDevice", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 170, "usage_type": "attribute"}, {"api_name": "torch.sigmoid", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 182, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.linear", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softmax", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 201, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.ne", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 241, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 246, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 247, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 249, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 250, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 253, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 256, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 258, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 259, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 260, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 261, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 264, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 266, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 267, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 269, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 306, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 310, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 312, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 312, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil.recursive_mean_standard_deviation", "line_number": 318, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 318, "usage_type": "name"}, {"api_name": "MyUtil.MyUtil.probit", "line_number": 332, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 332, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 346, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 354, "usage_type": "attribute"}, {"api_name": "MyUtil.MyUtil.frobenius_norm", "line_number": 367, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 367, "usage_type": "name"}, {"api_name": "MyUtil.MyUtil.frobenius_norm", "line_number": 370, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 370, "usage_type": "name"}, {"api_name": "MyUtil.MyUtil.recursive_mean_standard_deviation", "line_number": 383, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 383, "usage_type": "name"}, {"api_name": "MyUtil.MyUtil.recursive_mean_standard_deviation", "line_number": 389, "usage_type": "call"}, {"api_name": "MyUtil.MyUtil", "line_number": 389, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 406, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 421, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 439, "usage_type": "call"}, {"api_name": "torch.argmin", "line_number": 447, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.var", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.argmin", "line_number": 453, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 466, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 467, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 468, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 468, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 468, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 468, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 479, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 480, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 481, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 481, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 481, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 481, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 492, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 493, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 494, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 494, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 494, "usage_type": "attribute"}, {"api_name": "MySingletons.MyDevice", "line_number": 494, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 514, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 515, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 525, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 526, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 536, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 537, "usage_type": "call"}]} +{"seq_id": "25023949", "text": "import requests\nfrom iNaturalist.Common import convertToObservationResults\n\ndef get_all_observations_for_taxon(taxon_id):\n observations = []\n page_num = 1\n while True:\n url = 'https://api.inaturalist.org/v1/observations?taxon_id=' + str(taxon_id) + '&per_page=200&order=desc&order_by=created_at&page=' + str(page_num)\n\n json_results = requests.get(url).json()\n per_page = int(json_results[\"per_page\"])\n num_results = len(json_results[\"results\"])\n\n for observation in convertToObservationResults(json_results[\"results\"]):\n observations.append(observation)\n\n if (per_page != num_results):\n return observations\n \n page_num += 1", "sub_path": "iNaturalist/ApiRequests.py", "file_name": "ApiRequests.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "iNaturalist.Common.convertToObservationResults", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "308379667", "text": "import random\nfrom datetime import datetime\n\n\n# Stores Music library and returns appropriate songs\nRED_LOW = range(1000,1004)\nRED_MID = range(1,60)\nRED_HIGH = range(61,85)\nORANGE_LOW = []\nORANGE_MID = [1, 8, 12]\nORANGE_HIGH = [7, 11]\nYELLOW_LOW = []\nYELLOW_MID = [5]\nYELLOW_HIGH = [6]\nGREEN_LOW = []\nGREEN_MID = [7]\nGREEN_HIGH = [8]\nBLUE_LOW = []\nBLUE_MID = [9]\nBLUE_HIGH = [10]\nPURPLE_LOW = []\nPURPLE_MID = [11]\nPURPLE_HIGH = [12]\nWHITE_LOW = []\nWHITE_MID = [13]\nWHITE_HIGH = [14]\nMIDS = [RED_MID, ORANGE_MID, YELLOW_MID, GREEN_MID, BLUE_MID, PURPLE_MID, WHITE_MID]\nHIGHS = [RED_HIGH, ORANGE_HIGH, YELLOW_HIGH, GREEN_HIGH, BLUE_HIGH, PURPLE_HIGH, WHITE_HIGH]\nLOWS = [RED_LOW, ORANGE_LOW, YELLOW_LOW, GREEN_LOW, BLUE_LOW, PURPLE_LOW, WHITE_LOW]\nMEDITATIONS = []\n\nSET_THEME = 0\n\n\ndef find_low(theme=datetime.today().weekday()):\n if SET_THEME >= 0:\n theme = SET_THEME\n return random.choice(LOWS[theme])\n\n\ndef find_mid(theme=datetime.today().weekday()):\n if SET_THEME >= 0:\n theme = SET_THEME\n elif datetime.now().hour >= 21:\n all_mids = [song for sublist in MIDS for song in sublist]\n return random.choice(all_mids)\n return random.choice(MIDS[theme])\n\n\ndef find_high(theme=datetime.today().weekday()):\n if SET_THEME >= 0:\n theme = SET_THEME\n elif datetime.now().hour >= 21:\n all_highs = [song for sublist in HIGHS for song in sublist]\n return random.choice(all_highs)\n return random.choice(HIGHS[theme])\n\n\ndef get_meditation(this_time=datetime.now()):\n if this_time.hour < 12:\n return MEDITATIONS[this_time.day * 2]\n return MEDITATIONS[1 + this_time.day * 2]\n\n\"\"\"\n if this_time.day == 0:\n return MEDITATIONS[0];\n if this_time.day == 1:\n return MEDITATIONS[2];\n if this_time.day == 2:\n return MEDITATIONS[4];\n if this_time.day == 3:\n return MEDITATIONS[];\n if this_time.day == 0\n return MEDITATIONS[0];\n if this_time.day == 0\n return MEDITATIONS[0];\n if this_time.day == 0\n return MEDITATIONS[0];\n\"\"\"", "sub_path": "songs.py", "file_name": "songs.py", "file_ext": "py", "file_size_in_byte": 2109, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.today", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 46, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 55, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "248284098", "text": "import numpy as np\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nclass Job(object):\n \"\"\"\n A class used to bundle train/test data together with the model to be fit.\n\n Attributes\n ----------\n model : torch.nn.Module\n Pytorch model to fit\n loaders : dict\n Contains data loaders for train/test data\n device :\n Checks whether GPUs are available\n verbose : bool\n Controls verbosity of the output\n\n Methods\n -------\n train_model\n Trains the model over the data yielded by the training loader\n test_model\n Tests the model over the data yielded by the test loader\n get_losses\n Evaluates the loss function over the train and test sets\n \"\"\"\n\n def __init__(self, model, loader_train, loader_test, verbose=True):\n \"\"\"\n Parameters\n ----------\n model : torch.nn.Module\n Pytorch model to optimize\n loader_train : torch.utils.data.DataLoader\n Data loader for training data\n loader_test : torch.utils.data.DataLoader\n Data loader for testing data\n verbose : bool\n Controls verbosity of the output\n \"\"\"\n\n assert all([model, loader_train, loader_test]), \\\n 'Model and loaders must not be None.'\n self.model = model\n self.loaders = {\n 'train': loader_train,\n 'test': loader_test\n }\n\n # Check for GPU acceleration\n self.device = torch.device(\"cuda:0\"\n if torch.cuda.is_available()\n else \"cpu\")\n # Job settings\n self.verbose = verbose\n\n\n def get_losses(self, criterion=nn.CrossEntropyLoss()):\n \"\"\"Evaluate loss function over train and test sets.\n\n Parameters\n ----------\n criterion : Loss function\n Criterion to use for minimization\n\n Returns\n -------\n train_loss : float\n Average loss evaluated over the training set\n test_loss : float\n Average loss evaluated over the test set\n \"\"\"\n\n self.model.eval()\n with torch.no_grad():\n train_loss = np.array([\n criterion(self.model(inputs), labels).item()\n for (inputs, labels) in self.loaders['train']\n ]).mean()\n test_loss = np.array([\n criterion(self.model(inputs), labels).item()\n for (inputs, labels) in self.loaders['test']\n ]).mean()\n return train_loss, test_loss\n\n\n def train_model(self,\n opt=optim.Adam,\n criterion=nn.CrossEntropyLoss(),\n epochs=3,\n lr=0.0001,\n stride_print=1000,\n training_curves=False,\n dir_data=None):\n \"\"\"Train the model.\n\n Parameters\n ----------\n opt : Pytorch Optimizer object\n Optimizer to use\n criterion : Loss function\n Criterion to use for minimization\n epochs : int\n Number of epochs for training\n lr : float\n Learning rate to pass to the optimizer\n training_curves : bool\n Whether to generate and save loss curves\n dir_data :\n Path to directory in which to save loss data\n \"\"\"\n\n if not (self.loaders['train'] and self.loaders['test']):\n raise AttributeError('Data loaders have not been initialized.')\n\n # Whether to save loss data\n if training_curves:\n assert dir_data is not None, 'Specify where to save loss data.'\n assert os.path.exists(dir_data), 'Specified directory does not exist.'\n losses = []\n\n # Instantiate optimizer and set model to train mode\n optimizer = opt(self.model.parameters(), lr=lr)\n\n # Train and monitor loss\n # Note: Structure mirrors Pytorch tutorial @\n # https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html\n for i_epoch in range(epochs):\n\n self.model.train()\n running_loss = 0.0\n for i_data, data in enumerate(self.loaders['train']):\n\n # Evaluate outputs, perform backprop, and take a step\n inputs, labels = data\n optimizer.zero_grad()\n outputs = self.model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n # Monitor progress\n if i_data % stride_print == stride_print - 1 and self.verbose:\n print('[%d, %5d] loss: %.3f' %\n (i_epoch + 1, i_data + 1, running_loss / stride_print))\n sys.stdout.flush()\n running_loss = 0.0\n\n if training_curves:\n losses.append( self.get_losses(criterion=criterion) )\n\n if training_curves:\n losses = np.array(losses)\n fn_save = os.path.join(dir_data, 'loss_curves.npy')\n np.save(fn_save, losses)\n\n\n def test_model(self):\n \"\"\"Evaluate the model over the test set and print accuracy.\"\"\"\n\n # Set eval mode\n self.model.eval()\n\n # Accumulate stats\n total, correct = 0, 0\n with torch.no_grad():\n for data in self.loaders['test']:\n inputs, labels = data\n outputs = self.model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n # Print accuracy\n acc = 100 * correct / total\n print('Accuracy of the network on the 10000 test images: %d %%' % (acc))\n", "sub_path": "aether/job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 5876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.device", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "485603035", "text": "import asyncio\nimport json\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple\n\nimport attr\n\nfrom .datcore import DatcoreClient\nfrom .models import FileMetaData, FileMetaDataEx\n\nFileMetaDataVec = List[FileMetaData]\nFileMetaDataExVec = List[FileMetaDataEx]\n\nCURRENT_DIR = Path(__file__).resolve().parent\nlogger = logging.getLogger(__name__)\n\n# pylint: disable=W0703\n\n\n@contextmanager\ndef safe_call(error_msg: str = \"\", *, skip_logs: bool = False):\n try:\n yield\n except AttributeError:\n if not skip_logs:\n logger.warning(\"Calling disabled client. %s\", error_msg)\n except Exception: # pylint: disable=broad-except\n if error_msg and not skip_logs:\n logger.warning(error_msg, exc_info=True)\n\n\n# TODO: Use async callbacks for retreival of progress and pass via rabbit to server\ndef make_async(func):\n @wraps(func)\n async def async_wrapper(self, *args, **kwargs):\n blocking_task = self.loop.run_in_executor(\n self.pool, func, self, *args, **kwargs\n )\n _completed, _pending = await asyncio.wait([blocking_task])\n results = [t.result() for t in _completed]\n # TODO: does this always work?\n return results[0]\n\n return async_wrapper\n\n\nclass DatcoreWrapper:\n \"\"\" Wrapper to call the python2 api from datcore\n\n This can go away now. Next cleanup round...\n\n NOTE: Auto-disables client\n\n \"\"\"\n\n def __init__(\n self, api_token: str, api_secret: str, loop: object, pool: ThreadPoolExecutor\n ):\n self.api_token = api_token\n self.api_secret = api_secret\n\n self.loop = loop\n self.pool = pool\n\n try:\n self.d_client = DatcoreClient(\n api_token=api_token,\n api_secret=api_secret,\n host=\"https://api.blackfynn.io\",\n )\n except Exception:\n self.d_client = None # Disabled: any call will raise AttributeError\n logger.warning(\"Failed to setup datcore. Disabling client.\", exc_info=True)\n\n @property\n def is_communication_enabled(self) -> bool:\n \"\"\" Wrapper class auto-disables if client cannot be created\n\n e.g. if endpoint service is down\n\n :return: True if communication with datcore is enabled\n :rtype: bool\n \"\"\"\n return self.d_client is not None\n\n @make_async\n def list_files_recursively(self) -> FileMetaDataVec: # pylint: disable=W0613\n files = []\n\n with safe_call(error_msg=\"Error listing datcore files\"):\n files = self.d_client.list_files_recursively()\n\n return files\n\n @make_async\n def list_files_raw(self) -> FileMetaDataExVec: # pylint: disable=W0613\n files = []\n\n with safe_call(error_msg=\"Error listing datcore files\"):\n files = self.d_client.list_files_raw()\n\n return files\n\n @make_async\n def list_files_raw_dataset(\n self, dataset_id: str\n ) -> FileMetaDataExVec: # pylint: disable=W0613\n files = []\n with safe_call(error_msg=\"Error listing datcore files\"):\n files = self.d_client.list_files_raw_dataset(dataset_id)\n\n return files\n\n @make_async\n def delete_file(self, destination: str, filename: str) -> bool:\n # the object can be found in dataset/filename <-> bucket_name/object_name\n ok = False\n with safe_call(error_msg=\"Error deleting datcore file\"):\n ok = self.d_client.delete_file(destination, filename)\n return ok\n\n @make_async\n def delete_file_by_id(self, file_id: str) -> bool:\n ok = False\n with safe_call(error_msg=\"Error deleting datcore file\"):\n ok = self.d_client.delete_file_by_id(file_id)\n return ok\n\n @make_async\n def download_link(self, destination: str, filename: str) -> str:\n url = \"\"\n with safe_call(error_msg=\"Error getting datcore download link\"):\n url = self.d_client.download_link(destination, filename)\n\n return url\n\n @make_async\n def download_link_by_id(self, file_id: str) -> Tuple[str, str]:\n url = \"\"\n filename = \"\"\n with safe_call(error_msg=\"Error getting datcore download link\"):\n url, filename = self.d_client.download_link_by_id(file_id)\n\n return url, filename\n\n @make_async\n def create_test_dataset(self, dataset_name: str) -> Optional[str]:\n with safe_call(error_msg=\"Error creating test dataset\"):\n ds = self.d_client.get_dataset(dataset_name)\n if ds is not None:\n self.d_client.delete_files(dataset_name)\n else:\n ds = self.d_client.create_dataset(dataset_name)\n return ds.id\n return None\n\n @make_async\n def delete_test_dataset(self, dataset) -> None:\n with safe_call(error_msg=\"Error deleting test dataset\"):\n ds = self.d_client.get_dataset(dataset)\n if ds is not None:\n self.d_client.delete_files(dataset)\n\n @make_async\n def upload_file(\n self, destination: str, local_path: str, meta_data: FileMetaData = None\n ) -> bool:\n ok = False\n str_meta = json.dumps(attr.asdict(meta_data)) if meta_data else \"\"\n\n with safe_call(error_msg=\"Error uploading file to datcore\"):\n if str_meta:\n meta_data = json.loads(str_meta)\n ok = self.d_client.upload_file(destination, local_path, meta_data)\n else:\n ok = self.d_client.upload_file(destination, local_path)\n return ok\n\n @make_async\n def upload_file_to_id(self, destination_id: str, local_path: str) -> Optional[str]:\n _id = None\n with safe_call(error_msg=\"Error uploading file to datcore\"):\n _id = self.d_client.upload_file_to_id(destination_id, local_path)\n return _id\n\n @make_async\n def create_collection(\n self, destination_id: str, collection_name: str\n ) -> Optional[str]:\n _id = None\n with safe_call(error_msg=\"Error creating collection in datcore\"):\n _id = self.d_client.create_collection(destination_id, collection_name)\n return _id\n\n @make_async\n def list_datasets(self) -> List:\n data = []\n with safe_call(error_msg=\"Error creating collection in datcore\"):\n data = self.d_client.list_datasets()\n return data\n\n @make_async\n def ping(self) -> bool:\n ok = False\n with safe_call(skip_logs=True):\n profile = self.d_client.profile()\n ok = profile is not None\n return ok\n", "sub_path": "services/storage/src/simcore_service_storage/datcore_wrapper.py", "file_name": "datcore_wrapper.py", "file_ext": "py", "file_size_in_byte": 6736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "models.FileMetaData", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "models.FileMetaDataEx", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 24, "usage_type": "name"}, {"api_name": "asyncio.wait", "line_number": 43, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 38, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 61, "usage_type": "name"}, {"api_name": "datcore.DatcoreClient", "line_number": 70, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 151, "usage_type": "name"}, {"api_name": "models.FileMetaData", "line_number": 170, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "attr.asdict", "line_number": 173, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 177, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 193, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 200, "usage_type": "name"}]} +{"seq_id": "84085395", "text": "# coding=utf-8\n# Copyright 2020 The Meta-Dataset Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Metric-based learners.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport gin.tf\nfrom meta_dataset.learners import base as learner_base\nfrom meta_dataset.models import functional_backbones\nimport tensorflow.compat.v1 as tf\n\n\ndef _compute_prototypes(embeddings, labels):\n \"\"\"Computes class prototypes over the last dimension of embeddings.\n\n Args:\n embeddings: Tensor of examples of shape [num_examples, embedding_size].\n labels: Tensor of one-hot encoded labels of shape [num_examples,\n num_classes].\n\n Returns:\n prototypes: Tensor of class prototypes of shape [num_classes,\n embedding_size].\n \"\"\"\n labels = tf.cast(labels, tf.float32)\n\n # [num examples, 1, embedding size].\n embeddings = tf.expand_dims(embeddings, 1)\n\n # [num examples, num classes, 1].\n labels = tf.expand_dims(labels, 2)\n\n # Sums each class' embeddings. [num classes, embedding size].\n class_sums = tf.reduce_sum(labels * embeddings, 0)\n\n # The prototype of each class is the averaged embedding of its examples.\n class_num_images = tf.reduce_sum(labels, 0) # [way].\n prototypes = class_sums / class_num_images\n\n return prototypes\n\n\ndef compute_prototypes(embeddings, labels):\n \"\"\"Computes class prototypes over features.\n\n Flattens and reshapes the features if they are not already flattened.\n Args:\n embeddings: Tensor of examples of shape [num_examples, embedding_size] or\n [num_examples, spatial_dim, spatial_dim n_features].\n labels: Tensor of one-hot encoded labels of shape [num_examples,\n num_classes].\n\n Returns:\n prototypes: Tensor of class prototypes of shape [num_classes,\n embedding_size].\n \"\"\"\n if len(embeddings.shape) > 2:\n feature_shape = embeddings.shape.as_list()[1:]\n n_images = tf.shape(embeddings)[0]\n n_classes = tf.shape(labels)[-1]\n\n vectorized_embedding = tf.reshape(embeddings, [n_images, -1])\n vectorized_prototypes = _compute_prototypes(vectorized_embedding, labels)\n prototypes = tf.reshape(vectorized_prototypes, [n_classes] + feature_shape)\n else:\n prototypes = _compute_prototypes(embeddings, labels)\n\n return prototypes\n\n\nclass MetricLearner(learner_base.EpisodicLearner):\n \"\"\"A learner that uses a learned distance metric to make predictions.\"\"\"\n\n def __init__(self, **kwargs):\n super(MetricLearner, self).__init__(**kwargs)\n # `MetricLearner` subclasses don't require a pre-specified\n # output dimensionality.\n delattr(self, 'logit_dim')\n\n def forward_pass(self, data):\n \"\"\"Embeds all (training and testing) images of the episode.\n\n Args:\n data: A `meta_dataset.providers.Episode` containing the data for the\n episode.\n\n Returns:\n The predictions for the query set within the episode.\n \"\"\"\n # Compute the support set's mean and var and use these as the moments for\n # batch norm on the query set.\n support_embeddings_dict = self.embedding_fn(\n data.support_images,\n self.is_training,\n keep_spatial_dims=self.keep_spatial_dims)\n support_embeddings = support_embeddings_dict['embeddings']\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = support_embeddings_dict['moments']\n query_embeddings_dict = self.embedding_fn(\n data.query_images,\n self.is_training,\n moments=support_set_moments,\n keep_spatial_dims=self.keep_spatial_dims,\n backprop_through_moments=self.backprop_through_moments)\n query_embeddings = query_embeddings_dict['embeddings']\n\n query_logits = self.compute_logits(\n support_embeddings,\n query_embeddings,\n data.onehot_support_labels,\n )\n\n return query_logits\n\n def compute_logits(self, support_embeddings, query_embeddings,\n onehot_support_labels):\n raise NotImplementedError('Abstract method.')\n\n\n@gin.configurable\nclass PrototypicalNetworkLearner(MetricLearner):\n \"\"\"A Prototypical Network.\"\"\"\n keep_spatial_dims = False\n\n def compute_logits(self, support_embeddings, query_embeddings,\n onehot_support_labels):\n \"\"\"Computes the negative distances of each query point to each prototype.\"\"\"\n\n # [num test images, 1, embedding size].\n query_embeddings = tf.expand_dims(query_embeddings, 1)\n\n prototypes = compute_prototypes(support_embeddings, onehot_support_labels)\n\n # [1, num_clases, embedding_size].\n prototypes = tf.expand_dims(prototypes, 0)\n\n # Squared euclidean distances between each test embedding / prototype pair.\n distances = tf.reduce_sum(tf.square(query_embeddings - prototypes), 2)\n return -distances\n\n\n@gin.configurable\nclass MatchingNetworkLearner(MetricLearner):\n \"\"\"A Matching Network.\"\"\"\n keep_spatial_dims = False\n\n def __init__(self, exact_cosine_distance, **kwargs):\n \"\"\"Initializes the Matching Networks instance.\n\n Args:\n exact_cosine_distance: If True then the cosine distance is used, otherwise\n the query set embeddings are left unnormalized when computing the dot\n product.\n **kwargs: Keyword arguments common to all `MetricLearner`s.\n \"\"\"\n self.exact_cosine_distance = exact_cosine_distance\n super(MatchingNetworkLearner, self).__init__(**kwargs)\n\n def compute_logits(self, support_embeddings, query_embeddings,\n onehot_support_labels):\n \"\"\"Computes the class logits.\n\n Probabilities are computed as a weighted sum of one-hot encoded training\n labels. Weights for individual support/query pairs of examples are\n proportional to the (potentially semi-normalized) cosine distance between\n the embeddings of the two examples.\n\n Args:\n support_embeddings: A Tensor of size [num_support_images, embedding dim].\n query_embeddings: A Tensor of size [num_query_images, embedding dim].\n onehot_support_labels: A Tensor of size [batch size, way].\n\n Returns:\n The query set logits as a [num_query_images, way] matrix.\n \"\"\"\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n support_embeddings = tf.nn.l2_normalize(support_embeddings, 1, epsilon=1e-3)\n if self.exact_cosine_distance:\n query_embeddings = tf.nn.l2_normalize(query_embeddings, 1, epsilon=1e-3)\n # [num_query_images, num_support_images]\n similarities = tf.matmul(\n query_embeddings, support_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_query_images, way]\n probs = tf.matmul(attention, tf.cast(onehot_support_labels, tf.float32))\n return tf.log(probs)\n\n\n@gin.configurable\nclass RelationNetworkLearner(MetricLearner):\n \"\"\"A Relation Network.\"\"\"\n keep_spatial_dims = True\n\n def compute_logits(self, support_embeddings, query_embeddings,\n onehot_support_labels):\n \"\"\"Computes the relation score of each query example to each prototype.\"\"\"\n # [n_test, 21, 21, n_features].\n query_embed_shape = query_embeddings.shape.as_list()\n n_feature = query_embed_shape[3]\n out_shape = query_embed_shape[1:3]\n n_test = tf.shape(query_embeddings)[0]\n\n # [n_test, num_clases, 21, 21, n_feature].\n # It is okay one of the elements in the list to be tensor.\n prototypes = compute_prototypes(support_embeddings, onehot_support_labels)\n\n prototype_extended = tf.tile(\n tf.expand_dims(prototypes, 0), [n_test, 1, 1, 1, 1])\n # [num_clases, n_test, 21, 21, n_feature].\n query_f_extended = tf.tile(\n tf.expand_dims(query_embeddings, 1),\n [1, tf.shape(onehot_support_labels)[-1], 1, 1, 1])\n relation_pairs = tf.concat((prototype_extended, query_f_extended), 4)\n # relation_pairs.shape.as_list()[-3:] == [-1] + out_shape + [n_feature*2]\n relation_pairs = tf.reshape(relation_pairs,\n [-1] + out_shape + [n_feature * 2])\n relationnet_dict = functional_backbones.relation_module(relation_pairs)\n way = tf.shape(onehot_support_labels)[-1]\n relations = tf.reshape(relationnet_dict['output'], [-1, way])\n return relations\n\n def compute_loss(self, onehot_labels, predictions):\n \"\"\"Computes the MSE loss of `predictions` with respect to `onehot_labels`.\n\n Args:\n onehot_labels: A `tf.Tensor` containing the the class labels; each vector\n along the class dimension should hold a valid probability distribution.\n predictions: A `tf.Tensor` containing the the class predictions,\n interpreted as unnormalized log probabilities.\n\n Returns:\n A `tf.Tensor` representing the average loss.\n \"\"\"\n mse_loss = tf.losses.mean_squared_error(onehot_labels, predictions)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = mse_loss + regularization\n return loss\n", "sub_path": "meta_dataset/learners/metric_learners.py", "file_name": "metric_learners.py", "file_ext": "py", "file_size_in_byte": 9685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorflow.compat.v1.cast", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 45, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reduce_sum", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 51, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reduce_sum", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.shape", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 76, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.shape", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 77, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reshape", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 79, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reshape", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 81, "usage_type": "name"}, {"api_name": "meta_dataset.learners.base.EpisodicLearner", "line_number": 88, "usage_type": "attribute"}, {"api_name": "meta_dataset.learners.base", "line_number": 88, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 148, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 153, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reduce_sum", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 156, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.square", "line_number": 156, "usage_type": "call"}, {"api_name": "gin.tf.configurable", "line_number": 138, "usage_type": "attribute"}, {"api_name": "gin.tf", "line_number": 138, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.nn.l2_normalize", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.nn", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 199, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.nn.l2_normalize", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.nn", "line_number": 201, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 201, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.matmul", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 203, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.nn.softmax", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.nn", "line_number": 205, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 205, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.matmul", "line_number": 208, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 208, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.cast", "line_number": 208, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 208, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.log", "line_number": 209, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 209, "usage_type": "name"}, {"api_name": "gin.tf.configurable", "line_number": 160, "usage_type": "attribute"}, {"api_name": "gin.tf", "line_number": 160, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.shape", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 224, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.tile", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 230, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 231, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.tile", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 233, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.expand_dims", "line_number": 234, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 234, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.shape", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 235, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.concat", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 236, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reshape", "line_number": 238, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 238, "usage_type": "name"}, {"api_name": "meta_dataset.models.functional_backbones.relation_module", "line_number": 240, "usage_type": "call"}, {"api_name": "meta_dataset.models.functional_backbones", "line_number": 240, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.shape", "line_number": 241, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 241, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reshape", "line_number": 242, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 242, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.losses.mean_squared_error", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.losses", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 257, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reduce_sum", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 258, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.get_collection", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 259, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.GraphKeys", "line_number": 259, "usage_type": "attribute"}, {"api_name": "gin.tf.configurable", "line_number": 212, "usage_type": "attribute"}, {"api_name": "gin.tf", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "378849862", "text": "# -*- coding: utf-8 -*-\n# 材料报表 模块\nimport os,datetime, xlrd, json\nfrom web.models import Materialreport\nfrom myAPI.excelAPI import get_date, list_to_xlsx\nfrom django.shortcuts import render, redirect\nfrom myAPI.pageAPI import djangoPage, PAGE_NUM\nfrom myAPI.downfileAPI import down_file\nfrom web.forms.materialreport import MaterialreportForm\nfrom myAPI.modelAPI import get_model_first_id, get_model_last_id, \\\n get_model_up_id, get_model_name_up_id, get_model_data, get_post_data\nfrom myAPI.listAPI import pinyin\nfrom django.http.response import StreamingHttpResponse\nfrom myAPI.listAPI import pinyin \ndef model_id_update(request, model, id):\n \"\"\"\" 更新一条数据库记录。\n v[5] = round(float(v[4]) * float(v[3]), 2) #上月结存金额 = 数量 * 单价\n v[8] = round(float(v[7]) * float(v[6]), 2) #收入金额 = 数量 * 单价\n v[11] = round(float(v[10]) * float(v[9]), 2) #加权金额 = 数量 * 单价\n v[14] = round(float(v[13]) * float(v[12]), 2) #生产支出金额 = 数量 * 单价\n v[21] = round(float(v[20]) * float(v[19]), 2) #本月结存金额 = 数量 * 单价\n\n \"\"\" \n id = int(id) \n model.filter(id=id).update(operator = request.user.username) #更新经办人为登录用户\n \n #更新 上月结存金额 = 数量 * 单价 \n lastmonth_money = round(model.get(id=id).lastmonth_number * model.get(id=id).lastmonth_univalence, 2) #金额保留2位小数\n model.filter(id=id).update(lastmonth_money = lastmonth_money) \n \n #更新 收入金额 = 数量 * 单价\n income_money = round(model.get(id=id).income_number * model.get(id=id).income_univalence, 2) #金额保留2位小数\n model.filter(id=id).update(income_money = income_money) \n \n #更新 加权金额 = 数量 * 单价 \n weighting_money = round(model.get(id=id).weighting_number * model.get(id=id).weighting_univalence, 2) #金额保留2位小数\n model.filter(id=id).update(weighting_money = weighting_money) \n\n #更新 生产支出金额 = 数量 * 单价 \n production_expenditure_money = round(model.get(id=id).production_expenditure_number * model.get(id=id).production_expenditure_univalence, 2) #金额保留2位小数\n model.filter(id=id).update(production_expenditure_money = production_expenditure_money) \n\n #更新 本月结存金额 = 数量 * 单价\n thismonth_money = round(model.get(id=id).thismonth_number * model.get(id=id).thismonth_univalence, 2) #金额保留2位小数\n model.filter(id=id).update(thismonth_money = thismonth_money) \n \n return ''\n \ndef post_excel_model(request, post_file_excel, model, k):\n ''' Excel文件,多张工作表(数据),导入到数据库\n post_file_excel: 前端上传的文件名\n model: 数据库\n K: 数据库字段, 与Excel表格列对应 \n '''\n file_excel = request.FILES.get(post_file_excel)\n ext = os.path.splitext(file_excel.name)[1] \n if 'xls' not in ext and 'xlsx' not in ext:\n return 'err: 文件格式错误,请上传Excel文件。' \n model.objects.all().delete() #删除数据库 \n workbook = xlrd.open_workbook(file_contents=file_excel.file.read())\n sheet_sum = len(workbook.sheet_names()) # 获取电子表格 工作表总数\n filename_no = ''\n n, n1 = 0, 0 # n序号 n1 未被导入数据的工作表记数\n for index in range(0, sheet_sum):\n ret = workbook_model(workbook, 0, index, model, k) #从电子表格0行开始\n if ret[0:3] == 'err':\n return ret\n else:\n n += 1\n if ret:\n n1 += 1 #n1 未被导入数据的工作表记数\n filename_no += str(n)+ '. ' + ret + '、' #n序号 \n return \"导入了%s工作张表。未被导入数据的工作表总数:%s; 表名:%s\"%(str(sheet_sum), str(n1), filename_no)\n \ndef workbook_model(workbook, x, index, model, k):\n \"\"\" 电子表格,多张工作表写入数据库\n workbook: workbook = xlrd.open_workbook(file_contents=file_excel.file.read())\n x: 从x行开始 x=0,1,2...\n index: 工作表序号\n model: 数据库\n K: 数据库字段, 与Excel表格列对应\n 结束循环条件: 最后两行相同 \n \"\"\"\n sheet = workbook.sheet_by_index(index) \n try:\n #1.1、电子表格转换为列表;2、最后两行相同结束循环\n mylist = []\n for row_num in range(x, sheet.nrows): #从x行开始 x=0,1,2...\n row = sheet.row(row_num) #row -- [empty:'', empty:'', text:'HZ', number:10.0] \n v = []\n for r in row: #一次处理电子表格一行 \n v.append(r.value) \n mylist.append(v)\n if not any(v): #空行 v=['','','','','','',''] 结束循环\n break \n \n #2.1、列表中去掉列表最后两个元素;2、插入客户名称、经办人\n mlist = []\n name = ''\n filename_no = '' #未被导入的工作表名 \n for (n,v) in enumerate(mylist): \n v1 = v[1:] #v ['产品名称','','','','','','',''] v1['','','','','','','']\n if n == 0 and not any(v1):\n name = v[0] #获得单位名称\n else:\n v.insert(1, name) #插入 客户名称\n v.insert(22, '陈会计') #插入 经办人\n if n >= 3: \n mlist.append(v)\n \n #3.1、列表数据,初始化成与数据库字段一样的数据类型;2、数据写入数据库 \n object_list = [] \n for (n,v) in enumerate(mlist):\n if isinstance(v[0], int) or isinstance(v[0], float):\n v[0] = get_date(int(v[0])) #列表元素0,转换为时间格式\n else:\n v[0] = '1900-01-01'\n \n for r in range(3,22):\n if not v[r] or isinstance(v[r], str): \n v[r] = 0 #数值单元格 列��元素,如果为空或为字符型,转换为0\n else:\n v[r] = round(v[r], 2)\n \n# v[5] = round(float(v[4]) * float(v[3]), 2) #上月结存金额 = 数量 * 单价\n# v[8] = round(float(v[7]) * float(v[6]), 2) #收入金额 = 数量 * 单价\n# v[11] = round(float(v[10]) * float(v[9]), 2) #加权金额 = 数量 * 单价\n# v[14] = round(float(v[13]) * float(v[12]), 2) #生产支出金额 = 数量 * 单价\n# v[21] = round(float(v[20]) * float(v[19]), 2) #本月结存金额 = 数量 * 单价\n \n d = dict(zip(k,v)) \n object_list.append(model(**d)) \n \n if object_list:\n model.objects.bulk_create(object_list, batch_size=20)\n else:\n filename_no += '%s' %(index+1)\n \n return filename_no\n except Exception as e:\n print(e)\n return 'err: %s. 错误工作表:%s'%(e, index+1)\n \ndef materialreport_import(request):\n \"\"\"批量导入材料报表\"\"\"\n down_tpl = '/web/materialreport/tpl/' #下载模板路径\n \n if request.method == 'GET':\n return render(request, 'web/import.html',context=locals())\n \n k = [\"date\",\"name\",\"material_name\",\\\n \"lastmonth_number\",\"lastmonth_univalence\",\"lastmonth_money\",\\\n \"income_number\",\"income_univalence\",\"income_money\",\\\n \"weighting_number\",\"weighting_univalence\",\"weighting_money\",\\\n \"production_expenditure_number\",\"production_expenditure_univalence\",\"production_expenditure_money\",\\\n \"material_expenditure_number\",\"material_expenditure_money\",\\\n \"sale_number\",\"sale_money\",\\\n \"thismonth_number\",\"thismonth_univalence\",\"thismonth_money\",\"operator\"] \n ret = post_excel_model(request, 'post_file_excel', Materialreport, k) \n context = {'status': False, 'msg': '导入失败! %s' %ret} if ret[0:3] == 'err' \\\n else {'status': True, 'msg': '导入成功! %s. ' %ret} \n return render(request, 'web/import.html',context)\n \ndef materialreport_tpl(request):\n \"\"\"下载材料报表模板\"\"\"\n tpl_path = 'web/files/%s' % '批量导入材料报表模板.xlsx' \n return down_file(tpl_path, 'excel_tpl.xlsx')\n \ndef materialreport_list(request, page):\n \"\"\"材料报表列表\"\"\"\n cleanData = request.GET.dict()\n model = Materialreport.objects\n if request.method == 'POST':\n page = 1\n cleanData = request.POST.dict() \n dict.pop(cleanData,'csrfmiddlewaretoken') \n\n name, name_list, queryString, datas = get_model_data(model, cleanData) \n lastmonth_money = round(sum(datas.filter().values_list('lastmonth_money', flat=True)), 2) #上月结存金额 求和 \n income_money = round(sum(datas.filter().values_list('income_money', flat=True)),2) #收入金额 求和\n weighting_money = round(sum(datas.filter().values_list('weighting_money', flat=True)),2) #加权金额 求和 \n production_expenditure_money = round(sum(datas.filter().values_list('production_expenditure_money', flat=True)),2) #生产支出金额 求和\n material_expenditure_money = round(sum(datas.filter().values_list('material_expenditure_money', flat=True)),2) #材料支出金额 求和\n sale_money = round(sum(datas.filter().values_list('sale_money', flat=True)),2) #销售金额 求和\n thismonth_money = round(sum(datas.filter().values_list('thismonth_money', flat=True)),2) #本月结存金额 求和\n\n data_list, pageList, num_pages, page = djangoPage(datas,page,PAGE_NUM) #调用分页函数\n offset = PAGE_NUM * (page - 1) \n name_list = pinyin(list(set(Materialreport.objects.values_list('name', flat=True)))) \n name_list.insert(0, '')\n return render(request, 'web/materialreport/materialreport_list.html', context=locals())\n\n\ndef materialreport_add(request):\n \"\"\"添加\"\"\"\n if request.method == 'GET':\n form = MaterialreportForm()\n return render(request, 'web/form_submit.html', context=locals())\n form = MaterialreportForm(data=request.POST)\n if form.is_valid():\n form.save()\n # 更新一条数据库记录\n model = materialreport.objects\n model_id_update(request, model, get_model_last_id(model))\n return redirect('/web/materialreport/list/(.+)')\n return render(request, 'web/form_submit.html', context=locals())\n \n \ndef materialreport_edit(request, cid):\n \"\"\"编辑\"\"\"\n obj = Materialreport.objects.get(id=cid)\n if request.method == 'GET':\n form = MaterialreportForm(instance=obj)\n return render(request, 'web/form_submit.html', context=locals())\n form = MaterialreportForm(data=request.POST, instance=obj) \n if form.is_valid():\n form.save()\n # 更新一条数据库记录\n model = Materialreport.objects\n model_id_update(request, model, cid)\n return redirect('/web/materialreport/list/(.+)')\n return render(request, 'web/form_submit.html', context=locals())\n \ndef materialreport_del(request, cid):\n \"\"\"删除\"\"\"\n Materialreport.objects.filter(id=cid).delete()\n return redirect('/web/receivable/list/(.+)')\n\ndef convertxlsx(data_list, filePath, ids): #??\n ret = True\n try:\n # 数据库字段值,转化为电子表格值。 电子表格标题栏。1、与数据库字段保持一致。\n headings = [\"序号\", \"采购日期\", \"客户名称\", \"送货单号码\",\"摘要\", \\\n \"送货数量\", \"单价\", \"金额\", \"收款\", \"余额\", \"备注\", \\\n \"日期1\", \"发票号码\", \"金额1\", \"欠客户票\",\"经办人\"]\n \n k = [\"date\",\"name\",\"receipt\",\"abstract\",\"number\",\\\n \"univalence\",\"money\",\"collection\",\"balance\",\"note\",\\\n \"date1\",\"Invoice_number\",\"money1\",\"owe_ticket\",\"operator\"] \n \n date = [str(i.date + datetime.timedelta(hours=8)).split('+')[0] for i in data_list] #日期+时差 \n name = [i.name for i in data_list] \n receipt = [i.receipt for i in data_list] \n abstract = [i.abstract for i in data_list ] \n number = [i.number for i in data_list ] \n univalence = [i.univalence for i in data_list ] \n money = [i.money for i in data_list ] \n collection = [i.collection for i in data_list ] \n balance = [i.balance for i in data_list ] \n note = [i.note for i in data_list ] \n date1 = [str(i.date1 + datetime.timedelta(hours=8)).split('+')[0] for i in data_list] #日期+时差 \n Invoice_number = [i.Invoice_number for i in data_list ] \n money1 = [i.money1 for i in data_list ] \n owe_ticket = [i.owe_ticket for i in data_list ] \n operator = [i.operator for i in data_list ] \n \n data = [ids, date,name,receipt,abstract,number,univalence,\\\n money,collection,balance,note,date1,Invoice_number,money1,owe_ticket,operator ]\n if not list_to_xlsx(data, headings, filePath): # 保存为电子表格\n ret = False\n except Exception as _e:\n print('err: %s' %_e)\n ret = False\n return ret\n\n#单页保存Excel down_file\ndef materialreport_makexlsx_page(request, page):\n datas, tempFilePath, fileName = get_post_data(request, Materialreport.objects)\n datas, pageList, num_pages, page = djangoPage(datas, page, PAGE_NUM) #调用分页函数\n ids = [i+PAGE_NUM * (page - 1) for i in range(1,PAGE_NUM+1) ] #序号\n if convertxlsx(datas, tempFilePath, ids):\n return down_file(tempFilePath, fileName)\n return HttpResponseRedirect(r'/web/materialreport/list/%s' % (page))\n\n#全部保存Excel\ndef materialreport_makexlsx_all(request, page):\n datas, tempFilePath, fileName = get_post_data(request, Materialreport.objects)\n ids = [i for i in range(1,len(datas)+1) ] #序号\n if convertxlsx(datas, tempFilePath, ids):\n return down_file(tempFilePath, fileName)\n return HttpResponseRedirect(r'/web/materialreport/list/%s' % (page))\n\n\n", "sub_path": "mysite/web/views/materialreport.py", "file_name": "materialreport.py", "file_ext": "py", "file_size_in_byte": 14440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "os.path.splitext", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 60, "usage_type": "call"}, {"api_name": "myAPI.excelAPI.get_date", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 149, "usage_type": "call"}, {"api_name": "web.models.Materialreport", "line_number": 159, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 162, "usage_type": "call"}, {"api_name": "myAPI.downfileAPI.down_file", "line_number": 167, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 172, "usage_type": "name"}, {"api_name": "myAPI.modelAPI.get_model_data", "line_number": 178, "usage_type": "call"}, {"api_name": "myAPI.pageAPI.djangoPage", "line_number": 187, "usage_type": "call"}, {"api_name": "myAPI.pageAPI.PAGE_NUM", "line_number": 187, "usage_type": "argument"}, {"api_name": "myAPI.pageAPI.PAGE_NUM", "line_number": 188, "usage_type": "name"}, {"api_name": "myAPI.listAPI.pinyin", "line_number": 189, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects.values_list", "line_number": 189, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 191, "usage_type": "call"}, {"api_name": "web.forms.materialreport.MaterialreportForm", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 198, "usage_type": "call"}, {"api_name": "web.forms.materialreport.MaterialreportForm", "line_number": 199, "usage_type": "call"}, {"api_name": "myAPI.modelAPI.get_model_last_id", "line_number": 204, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 205, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 206, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects.get", "line_number": 211, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 211, "usage_type": "name"}, {"api_name": "web.forms.materialreport.MaterialreportForm", "line_number": 213, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 214, "usage_type": "call"}, {"api_name": "web.forms.materialreport.MaterialreportForm", "line_number": 215, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 219, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 219, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 221, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 222, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects.filter", "line_number": 226, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 226, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 241, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 251, "usage_type": "call"}, {"api_name": "myAPI.excelAPI.list_to_xlsx", "line_number": 259, "usage_type": "call"}, {"api_name": "myAPI.modelAPI.get_post_data", "line_number": 268, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 268, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 268, "usage_type": "name"}, {"api_name": "myAPI.pageAPI.djangoPage", "line_number": 269, "usage_type": "call"}, {"api_name": "myAPI.pageAPI.PAGE_NUM", "line_number": 269, "usage_type": "argument"}, {"api_name": "myAPI.pageAPI.PAGE_NUM", "line_number": 270, "usage_type": "name"}, {"api_name": "myAPI.downfileAPI.down_file", "line_number": 272, "usage_type": "call"}, {"api_name": "myAPI.modelAPI.get_post_data", "line_number": 277, "usage_type": "call"}, {"api_name": "web.models.Materialreport.objects", "line_number": 277, "usage_type": "attribute"}, {"api_name": "web.models.Materialreport", "line_number": 277, "usage_type": "name"}, {"api_name": "myAPI.downfileAPI.down_file", "line_number": 280, "usage_type": "call"}]} +{"seq_id": "595917651", "text": "\n# -*- coding: utf-8 -*-\n\nfrom copy import deepcopy\nfrom scipy.stats import norm\nimport numpy as np\nfrom sklearn.model_selection import StratifiedKFold, cross_val_predict, KFold\nfrom sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_auc_score\nfrom base import RANDOM_STATE\n\n\n###############################################################################\nACCURACY = 'accuracy'\nF1 = 'f1_score'\nROC_AUC = 'roc_auc_score'\nCONFUSION_MATRIX = 'confusion_matrix'\nVALUES_TRUE = 'y_valid'\nVALUES_PRED = 'y_pred'\nTEST_PREDICTIONS = 'y_test'\nHYPERPARAMS = 'hyperparams'\nFEATURES = 'features'\nOBJECTS = 'objects'\n\nALL_METRICS = [\n ACCURACY, F1,\n ROC_AUC,\n CONFUSION_MATRIX,\n VALUES_TRUE,\n VALUES_PRED,\n TEST_PREDICTIONS,\n HYPERPARAMS,\n FEATURES,\n OBJECTS\n]\n\nALL_Y_TRUE_Y_PRED_BASED_METRICS = [\n ACCURACY, F1,\n ROC_AUC,\n CONFUSION_MATRIX,\n VALUES_TRUE,\n VALUES_PRED\n]\n\nPLOT_METRICS = [\n ACCURACY, F1,\n ROC_AUC\n]\n\n\n###############################################################################\nclass AccuracyLossGetter:\n \"\"\"Calculate loss function.\"\"\"\n def __call__(self, metrics):\n return 1.0 - metrics[ROC_AUC]\n\n\nclass MetricsGetter:\n \"\"\"Calculate metrics.\"\"\"\n\n def __init__(self, metrics, loss_func, n_folds):\n self._metrics = metrics\n self._loss_func = loss_func\n self._n_folds = n_folds\n\n def __call__(self, model, X, y, features, objects, X_test=None):\n model = deepcopy(model)\n metrics = self.get_cv_metrics(\n model,\n X,\n y,\n features,\n objects,\n self._metrics,\n self._n_folds,\n X_test=X_test,\n )\n loss = self._loss_func(metrics)\n\n return metrics, loss\n\n def set_folds_count(self, n_folds):\n self._n_folds = n_folds\n\n def get_cv_metrics(self, model, X, y, features, objects, metrics, n_folds, X_test=None):\n \"\"\"Calculate metrics for the model on (X, y) dataset using cross-validation.\"\"\"\n y_pred = cross_val_predict(\n model,\n X,\n y,\n cv=KFold(\n n_splits=n_folds,\n shuffle=True,\n random_state=RANDOM_STATE\n )\n )\n # get metrics from training set\n result = self.get_y_true_y_pred_based_metrics(y, y_pred, metrics)\n # fit model to get features and predictions\n model.fit(X, y)\n\n if HYPERPARAMS in metrics:\n result[HYPERPARAMS] = model.get_hyperparams()\n if FEATURES in metrics:\n result[FEATURES] =features[model.get_support(as_indices=True)]\n if OBJECTS in metrics:\n result[OBJECTS] = objects\n if TEST_PREDICTIONS in metrics:\n # predictions for X_test\n result[TEST_PREDICTIONS] = model.predict(X_test)\n\n return result\n\n def get_y_true_y_pred_based_metrics(self, y_true, y_pred, metrics):\n \"\"\"Calculate metrics for y_pred, y_true arrays.\"\"\"\n result = dict()\n if ACCURACY in metrics:\n result[ACCURACY] = accuracy_score(y_true, y_pred)\n if F1 in metrics:\n result[F1] = f1_score(y_true, y_pred)\n if ROC_AUC in metrics:\n result[ROC_AUC] = roc_auc_score(y_true, y_pred)\n if CONFUSION_MATRIX in metrics:\n result[CONFUSION_MATRIX] = confusion_matrix(y_true, y_pred)\n if VALUES_TRUE in metrics:\n result[VALUES_TRUE] = y_true\n if VALUES_PRED in metrics:\n result[VALUES_PRED] = y_pred\n\n return result\n\n # .. compare predictions ..\n\n def results_differ_p_value(self, y_true, y1, y2):\n y1 = (np.array(y1) == np.array(y_true)).astype(np.float64)\n y2 = (np.array(y2) == np.array(y_true)).astype(np.float64)\n diff = y1 - y2\n norm_stat = diff.mean() / diff.std() * np.sqrt(diff.shape[0])\n quantile = norm.cdf(norm_stat)\n\n return min(quantile, 1.0 - quantile)\n", "sub_path": "metrics_getter.py", "file_name": "metrics_getter.py", "file_ext": "py", "file_size_in_byte": 4000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "copy.deepcopy", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_predict", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 90, "usage_type": "call"}, {"api_name": "base.RANDOM_STATE", "line_number": 93, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "368834505", "text": "from matplotlib.pyplot import *\nimport scipy.special as sp\n\ndef plt_3d(x, y, z):\n fig = figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(x, y, z, label='title',cmap='viridis', edgecolor='none')\n show()\n\ndef integral(n, step_f, rs_f, ys_f, rs_F):\n # first dimension - x\n r_2d = np.broadcast_to(rs_f[:, np.newaxis], (n, n))\n\n # second dimension - u\n u_2d = np.broadcast_to(rs_F[np.newaxis, :], (n, n))\n\n # J0(kr) * r\n A = sp.j0(u_2d * r_2d) * r_2d\n\n # scale rows by f(x)\n A = A * np.broadcast_to(ys_f[:, np.newaxis], (n, n))\n\n int_weights = np.ones(n)\n int_weights[0] = 1 / 2\n int_weights[-1] = 1 / 2\n int_weights *= step_f\n\n # scale rows by int_weights\n A = A * np.broadcast_to(int_weights[:, np.newaxis], (n, n))\n\n ys_F = np.sum(A, axis=0)\n\n return ys_F\n\ndef draw_2d(sp_n, sp_m, sp_c, xs, ys, s):\n extent = [xs[0], xs[-1], xs[0], xs[-1]]\n subplot(sp_n, sp_m, sp_c)\n imshow(np.abs(ys), extent=extent)\n colorbar()\n title(f'$\\\\left|{s}\\\\right|$')\n\ndef get_2d(F, shape, dtype):\n F2d = np.zeros(shape, dtype=dtype)\n for i in range(shape[0]):\n for j in range(shape[1]):\n F2d[i][j] = F[j + i * shape[1]]\n return F2d\n\nn = 50\nalpha = 6.0\nbeta = 6.0\nx = np.linspace(-np.pi, np.pi, n)\ny = np.linspace(-np.pi, np.pi, n)\n\nx2d, y2d = np.meshgrid(x, y)\nf = lambda r: np.exp((-r ** 2) / beta) * (np.sin(alpha * r) ** 2)\nr2d = np.sqrt(np.sqrt(x2d ** 2 + y2d ** 2))\nf2d = f(r2d)\n\nfigure(figsize=(8, 6))\n#plt_3d(x2d, y2d, np.abs(f2d))\ndraw_2d(2, 2, 1, x, f2d, 'sourse f')\n\nr = r2d.ravel()\nF = integral(r.shape[0], abs(r[1] - r[0]), r, f2d.ravel(), r)\nF2d = get_2d(F, x2d.shape, np.complex128)\n\n#plt_3d(x2d, y2d, np.abs(F2d))\ndraw_2d(2, 2, 2, x, F2d, 'my Hankel')\n\nshow()", "sub_path": "lab2package/main3.py", "file_name": "main3.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scipy.special.j0", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "305755709", "text": "#!/usr/bin/python3\n#Imports\nimport argparse\nimport os\n\nimport pyvisgraph as vg\nimport svggen\nimport minkowski\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef numtotime(num):\n num = round(abs(num))\n hours = num // 3600\n minutes = (num % 3600) // 60\n seconds = (num % 3600) % 60\n return hours, minutes, seconds\n\n#Commandline-Argumente parsen\nparser = argparse.ArgumentParser(description=\"Lösung zu Lisa rennt, Aufgabe 1, Runde 2, 37. BwInf von Lukas Rost\")\n\nparser.add_argument('-i', action=\"store\",dest=\"input\",default=\"lisarennt1.txt\",help=\"Eingabedatei\")\nparser.add_argument('-o',action=\"store\",dest=\"output\",default=\"lisarennt1_output.txt\",help=\"Ausgabedatei\")\nparser.add_argument('-so', action=\"store\",dest=\"svg\",default=\"lisarennt1_svg.svg\",help=\"SVG-Ausgabedatei\")\nparser.add_argument('-d',action=\"store_true\",default=False,dest=\"debug\",help=\"Debug-Ausgaben aktivieren\")\nparser.add_argument('-vlisa',action=\"store\",dest=\"velocity_lisa\",default=15,type=float,help=\"Erweiterung Geschwindigkeiten: Lisa in km/h\")\nparser.add_argument('-vbus',action=\"store\",dest=\"velocity_bus\",default=30,type=float,help=\"Erweiterung Geschwindigkeiten: Bus in km/h\")\nparser.add_argument('-minkowski',action=\"store\",default=None,help=\"Erweiterung Minkowski-Summe: Eingabedatei (1 Polygon im gleichen Format wie in der normalen Eingabe)\")\n\nargs = parser.parse_args()\n\n#Geschwindigkeiten in m/s umrechnen\nreal_v_lisa = round(args.velocity_lisa / 3.6 ,3)\nreal_v_bus = round(args.velocity_bus / 3.6 ,3)\n\n#Maximale x und y für Darstellung\nmaxx = 0\nmaxy = 0\n\n# Polygone einlesen\ninfile = open(args.input,'r')\nnumpoly = int(infile.readline())\npolylist = []\n\nfor i in range(numpoly):\n pointlist = []\n line = infile.readline().split(\" \")\n line = [float(x) for x in line]\n index = 1\n for j in range(int(line[0])):\n maxx = max(maxx,line[index])\n maxy = max(maxy,line[index+1])\n pointlist.append(vg.Point(line[index],line[index+1],polygon_id=(\"P\" + str(i+1))))\n index += 2\n polylist.append(pointlist)\n\n#Lisas Position einlesen\npos = infile.readline().split(\" \")\npos = [float(x) for x in pos]\nlisa = vg.Point(pos[0],pos[1],polygon_id=\"L\")\ninfile.close()\n\nmaxx = max(maxx,pos[0])\nmaxy = max(maxy,pos[1])\n\n#Erweiterung Minkowski-Summe\nif args.minkowski is not None:\n minfile = open(args.minkowski,'r')\n lisa_poly = []\n line = minfile.readline().split(\" \")\n minfile.close()\n line = [float(x) for x in line]\n index = 1\n for j in range(int(line[0])):\n lisa_poly.append(vg.Point(-line[index],-line[index+1]))\n index += 2\n polylist = minkowski.minkowski_sum_list(polylist,lisa_poly)\n\n#Graph erstellen und Algorithmus ausführen\ngraph = vg.VisGraph(real_v_lisa,real_v_bus,lisa)\ngraph.build(polylist)\npath,mintime,min_bus_time, minpoint,dist_minpoint = graph.shortest_path()\n\n#Debug-Ausgaben\nif args.debug:\n outpath = os.path.dirname(args.input) + \"/out/debug/\" + os.path.basename(args.input).split(\".\")[0]\n ensure_dir(outpath)\n svgfile = open(outpath + \"-visgraph.svg\",\"w\")\n svgfile.write(svggen.gen_vis_svg(graph.get_visgraph(),polylist,lisa,maxx+200,maxy+500))\n svgfile.close()\n \n\n#Ausgabe SVG\nsvgfile = open(args.svg,\"w\")\nsvgfile.write(svggen.gen_output_svg(path,polylist,lisa,maxx+200,maxy+500))\nsvgfile.close()\n\n#Ausgabe Text\nouttext = \"\"\nhours, minutes, seconds = numtotime(mintime)\n# Normalfall: Startzeit vor 7.30\nif mintime < 0:\n hours = 7 - hours\n minutes = 30 - minutes\n if seconds != 0:\n minutes -= 1\n seconds = 60 - seconds\n# Wenn Startzeit nach 7.30\nelse:\n hours = 7 + hours\n minutes = 30 + minutes\nbhours, bminutes, bseconds = numtotime(min_bus_time)\nbhours = 7 + bhours\nbminutes = 30 + bminutes\nouttext += \"Lisa startet um {:02d}:{:02d}:{:02d} und erreicht den Bus um {:02d}:{:02d}:{:02d}.\\n\".format(int(round(hours)), int(round(minutes)), int(round(seconds)),int(round(bhours)), int(round(bminutes)), int(round(bseconds)))\nouttext += \"Sie trifft bei der y-Koordinate {} auf den Bus.\\n\".format(minpoint.y)\nouttext += \"Die Route dauert {:0.2f} Minuten und ist {:0.2f} Meter lang.\\n\".format(dist_minpoint/(real_v_lisa*60),dist_minpoint)\nouttext += \"Die Route besteht aus folgenden Punkten:\\n\"\nfor point in path:\n outtext += \"{} {} {}\\n\".format(point.x,point.y,point.real_polygon_id)\n\noutfile = open(args.output,\"w\")\noutfile.write(outtext)\noutfile.close()", "sub_path": "Aufgabe1-Implementierung/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4478, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "pyvisgraph.Point", "line_number": 56, "usage_type": "call"}, {"api_name": "pyvisgraph.Point", "line_number": 63, "usage_type": "call"}, {"api_name": "pyvisgraph.Point", "line_number": 78, "usage_type": "call"}, {"api_name": "minkowski.minkowski_sum_list", "line_number": 80, "usage_type": "call"}, {"api_name": "pyvisgraph.VisGraph", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 89, "usage_type": "call"}, {"api_name": "svggen.gen_vis_svg", "line_number": 92, "usage_type": "call"}, {"api_name": "svggen.gen_output_svg", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "46971736", "text": "from tfmodel.model import PFNet, Transformer, DummyNet\nimport tensorflow as tf\nimport tensorflow_probability\nimport tensorflow_addons as tfa\nimport pickle\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\nimport sys\nimport glob\nimport io\nimport os\nimport yaml\nimport uuid\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sklearn\nimport kerastuner as kt\nfrom argparse import Namespace\nimport time\nimport json\nimport random\n\nclass PFNetLoss:\n def __init__(self, num_input_classes, num_output_classes, classification_loss_coef=1.0, charge_loss_coef=1e-3, momentum_loss_coef=1.0, momentum_loss_coefs=[1.0, 1.0, 1.0]):\n self.num_input_classes = num_input_classes\n self.num_output_classes = num_output_classes\n self.momentum_loss_coef = momentum_loss_coef\n self.momentum_loss_coefs = tf.constant(momentum_loss_coefs)\n self.charge_loss_coef = charge_loss_coef\n self.classification_loss_coef = classification_loss_coef\n self.gamma = 10.0\n\n def mse_unreduced(self, true, pred):\n return tf.math.pow(true-pred,2)\n\n def separate_prediction(self, y_pred):\n N = self.num_output_classes\n pred_id_logits = y_pred[:, :, :N]\n pred_charge = y_pred[:, :, N:N+1]\n pred_momentum = y_pred[:, :, N+1:]\n return pred_id_logits, pred_charge, pred_momentum\n\n def separate_truth(self, y_true):\n true_id = tf.cast(y_true[:, :, :1], tf.int32)\n true_charge = y_true[:, :, 1:2]\n true_momentum = y_true[:, :, 2:]\n return true_id, true_charge, true_momentum\n\n def loss_components(self, y_true, y_pred):\n pred_id_logits, pred_charge, pred_momentum = self.separate_prediction(y_pred)\n pred_id = tf.cast(tf.argmax(pred_id_logits, axis=-1), tf.int32)\n true_id, true_charge, true_momentum = self.separate_truth(y_true)\n true_id_onehot = tf.one_hot(tf.cast(true_id, tf.int32), depth=self.num_output_classes)\n\n #l1 = tf.nn.softmax_cross_entropy_with_logits(true_id_onehot, pred_id_logits)*self.classification_loss_coef\n l1 = tfa.losses.sigmoid_focal_crossentropy(tf.squeeze(true_id_onehot, [2]), pred_id_logits, from_logits=False, gamma=self.gamma)*self.classification_loss_coef\n l2 = self.mse_unreduced(true_momentum, pred_momentum) * self.momentum_loss_coef * self.momentum_loss_coefs\n l2s = tf.reduce_sum(l2, axis=-1)\n\n l3 = self.charge_loss_coef*self.mse_unreduced(true_charge, pred_charge)[:, :, 0]\n\n return l1, l2s, l3, l2\n\n def my_loss_full(self, y_true, y_pred):\n l1, l2, l3, _ = self.loss_components(y_true, y_pred)\n loss = l1 + l2 + l3\n\n return loss\n\n def my_loss_cls(self, y_true, y_pred):\n l1, l2, l3, _ = self.loss_components(y_true, y_pred)\n loss = l1\n\n return loss\n\n def my_loss_reg(self, y_true, y_pred):\n l1, l2, l3, _ = self.loss_components(y_true, y_pred)\n loss = l3\n\n return loss\n\ndef plot_confusion_matrix(cm):\n fig = plt.figure(figsize=(5,5))\n plt.imshow(cm, cmap=\"Blues\")\n plt.title(\"Reconstructed PID (normed to gen)\")\n plt.xlabel(\"MLPF PID\")\n plt.ylabel(\"Gen PID\")\n plt.colorbar()\n plt.tight_layout()\n return fig\n\ndef plot_regression(val_x, val_y, var_name, rng):\n fig = plt.figure(figsize=(5,5))\n plt.hist2d(\n val_x,\n val_y,\n bins=(rng, rng),\n cmap=\"Blues\",\n #norm=matplotlib.colors.LogNorm()\n );\n plt.xlabel(\"Gen {}\".format(var_name))\n plt.ylabel(\"MLPF {}\".format(var_name))\n return fig\n\ndef plot_multiplicity(num_pred, num_true):\n fig = plt.figure(figsize=(5,5))\n xs = np.arange(len(num_pred))\n plt.bar(xs, num_true, alpha=0.8)\n plt.bar(xs, num_pred, alpha=0.8)\n plt.xticks(xs)\n return fig\n\ndef plot_num_particle(num_pred, num_true, pid):\n fig = plt.figure(figsize=(5,5))\n plt.scatter(num_true, num_pred)\n plt.title(\"particle id {}\".format(pid))\n plt.xlabel(\"num true\")\n plt.ylabel(\"num pred\")\n a = min(np.min(num_true), np.min(num_pred))\n b = max(np.max(num_true), np.max(num_pred))\n plt.xlim(a, b)\n plt.ylim(a, b)\n return fig\n\ndef plot_to_image(figure):\n \"\"\"\n Converts the matplotlib plot specified by 'figure' to a PNG image and\n returns it. The supplied figure is closed and inaccessible after this call.\n \"\"\"\n \n buf = io.BytesIO()\n \n # Use plt.savefig to save the plot to a PNG in memory.\n plt.savefig(buf, format='png')\n plt.close(figure)\n buf.seek(0)\n \n image = tf.image.decode_png(buf.getvalue(), channels=4)\n image = tf.expand_dims(image, 0)\n \n return image\n\ndef plot_distributions(val_x, val_y, var_name, rng):\n fig = plt.figure(figsize=(5,5))\n plt.hist(val_x, bins=rng, density=True, histtype=\"step\", lw=2, label=\"gen\");\n plt.hist(val_y, bins=rng, density=True, histtype=\"step\", lw=2, label=\"MLPF\");\n plt.xlabel(var_name)\n plt.legend(loc=\"best\", frameon=False)\n plt.ylim(0,1.5)\n return fig\n\ndef plot_particles(y_pred, y_true, pid=1):\n #Ground truth vs model prediction particles\n fig = plt.figure(figsize=(10,10))\n\n ev = y_true[0, :]\n msk = ev[:, 0] == pid\n plt.scatter(ev[msk, 3], np.arctan2(ev[msk, 4], ev[msk, 5]), s=2*ev[msk, 2], marker=\"o\", alpha=0.5)\n\n ev = y_pred[0, :]\n msk = ev[:, 0] == pid\n plt.scatter(ev[msk, 3], np.arctan2(ev[msk, 4], ev[msk, 5]), s=2*ev[msk, 2], marker=\"s\", alpha=0.5)\n\n plt.xlabel(\"eta\")\n plt.ylabel(\"phi\")\n plt.xlim(-5,5)\n plt.ylim(-4,4)\n\n return fig\n\nclass ConfusionMatrixValidation:\n def __init__(self, X_test, y_test, loss_cls, outdir, model, num_input_classes, num_output_classes, file_writer_cm):\n self.X_test = X_test\n self.y_test = y_test\n self.loss_cls = loss_cls\n self.outdir = outdir\n self.model = model\n self.num_input_classes = num_input_classes\n self.num_output_classes = num_output_classes\n self.file_writer_cm = file_writer_cm\n\n def log_confusion_matrix(self, epoch, logs):\n \n outdir = self.outdir\n model = self.model\n X_test = self.X_test\n y_test = self.y_test\n\n test_pred = model.predict(X_test, batch_size=5)\n msk = X_test[:, :, 0] != 0\n\n if isinstance(test_pred, tuple):\n test_pred = tf.concat(list(test_pred), axis=-1)\n\n l1, l2, l3, l2_r = self.loss_cls.loss_components(y_test, test_pred)\n\n logs[\"epoch\"] = int(epoch)\n logs[\"l1\"] = float(tf.reduce_mean(l1).numpy())\n logs[\"l2\"] = float(tf.reduce_mean(l2).numpy())\n logs[\"l2_split\"] = [float(x) for x in tf.reduce_mean(l2_r, axis=[0,1])]\n logs[\"l3\"] = float(tf.reduce_mean(l3).numpy())\n\n with open(\"{}/logs_{}.json\".format(outdir, epoch), \"w\") as fi:\n json.dump(logs, fi)\n\n test_pred_id = np.argmax(test_pred[:, :, :self.num_output_classes], axis=-1)\n \n counts_pred = np.unique(test_pred_id, return_counts=True)\n\n test_pred = np.concatenate([np.expand_dims(test_pred_id, axis=-1), test_pred[:, :, self.num_output_classes:]], axis=-1)\n\n cm = sklearn.metrics.confusion_matrix(\n y_test[msk][:, 0].astype(np.int64).flatten(),\n test_pred[msk][:, 0].flatten(), labels=list(range(self.num_output_classes)))\n cm_normed = sklearn.metrics.confusion_matrix(\n y_test[msk][:, 0].astype(np.int64).flatten(),\n test_pred[msk][:, 0].flatten(), labels=list(range(self.num_output_classes)), normalize=\"true\")\n\n num_pred = np.sum(cm, axis=0)\n num_true = np.sum(cm, axis=1)\n\n figure = plot_confusion_matrix(cm)\n cm_image = plot_to_image(figure)\n\n figure = plot_confusion_matrix(cm_normed)\n cm_image_normed = plot_to_image(figure)\n\n msk = (test_pred[:, :, 0]!=0) & (y_test[:, :, 0]!=0)\n\n ch_true = y_test[msk, 1].flatten()\n ch_pred = test_pred[msk, 1].flatten()\n\n figure = plot_regression(ch_true, ch_pred, \"charge\", np.linspace(-2, 2, 100))\n ch_image = plot_to_image(figure)\n\n figure = plot_multiplicity(num_pred, num_true)\n n_image = plot_to_image(figure)\n\n images_mult = []\n for icls in range(self.num_output_classes):\n n_pred = np.sum(test_pred[:, :, 0]==icls, axis=1)\n n_true = np.sum(y_test[:, :, 0]==icls, axis=1)\n figure = plot_num_particle(n_pred, n_true, icls)\n images_mult.append(plot_to_image(figure))\n\n images = {}\n for ireg in range(l2_r.shape[-1]):\n reg_true = y_test[msk, 2+ireg].flatten()\n reg_pred = test_pred[msk, 2+ireg].flatten()\n\n figure = plot_regression(reg_true, reg_pred, \"reg {}\".format(ireg), np.linspace(np.mean(reg_true) - 3*np.std(reg_true), np.mean(reg_true) + 3*np.std(reg_true), 100))\n images[ireg] = plot_to_image(figure)\n\n with self.file_writer_cm.as_default():\n tf.summary.image(\"Confusion Matrix\", cm_image, step=epoch)\n tf.summary.image(\"Confusion Matrix Normed\", cm_image_normed, step=epoch)\n tf.summary.image(\"Confusion Matrix Normed\", cm_image_normed, step=epoch)\n tf.summary.image(\"charge regression\", ch_image, step=epoch)\n tf.summary.image(\"particle multiplicity\", n_image, step=epoch)\n\n for icls, img in enumerate(images_mult):\n tf.summary.image(\"npart {}\".format(icls), img, step=epoch)\n\n for ireg in images.keys():\n tf.summary.image(\"regression {}\".format(ireg), images[ireg], step=epoch)\n\n tf.summary.scalar(\"loss_cls\", tf.reduce_mean(l1), step=epoch)\n for i in range(l2_r.shape[-1]):\n tf.summary.scalar(\"loss_reg_{}\".format(i), tf.reduce_mean(l2_r[:, :, i]), step=epoch)\n\n for i in range(cm_normed.shape[0]):\n tf.summary.scalar(\"acc_cls_{}\".format(i), cm_normed[i, i], step=epoch)\n \n tf.summary.scalar(\"loss_chg\", tf.reduce_mean(l3), step=epoch)\n\ndef prepare_callbacks(model, outdir):\n callbacks = []\n tb = tf.keras.callbacks.TensorBoard(\n log_dir=outdir, histogram_freq=1, write_graph=False, write_images=False,\n update_freq='epoch',\n #profile_batch=(10,90),\n profile_batch=0,\n )\n tb.set_model(model)\n callbacks += [tb]\n\n terminate_cb = tf.keras.callbacks.TerminateOnNaN()\n callbacks += [terminate_cb]\n\n cp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=outdir + \"/weights-{epoch:02d}-{val_loss:.6f}.hdf5\",\n save_weights_only=True,\n verbose=0\n )\n cp_callback.set_model(model)\n callbacks += [cp_callback]\n\n return callbacks\n\ndef get_rundir(base='experiments'):\n if not os.path.exists(base):\n os.makedirs(base)\n\n previous_runs = os.listdir(base)\n if len(previous_runs) == 0:\n run_number = 1\n else:\n run_number = max([int(s.split('run_')[1]) for s in previous_runs]) + 1\n\n logdir = 'run_%02d' % run_number\n return '{}/{}'.format(base, logdir)\n\ndef compute_weights_invsqrt(X, y, w):\n wn = tf.cast(tf.shape(w)[-1], tf.float32)/tf.sqrt(w)\n wn *= tf.cast(X[:, 0]!=0, tf.float32)\n #wn /= tf.reduce_sum(wn)\n return X, y, wn\n\ndef compute_weights_none(X, y, w):\n wn = tf.ones_like(w)\n wn *= tf.cast(X[:, 0]!=0, tf.float32)\n return X, y, wn\n\nweight_functions = {\n \"inverse_sqrt\": compute_weights_invsqrt,\n \"none\": compute_weights_none,\n}\n\ndef scale_outputs(X,y,w):\n ynew = y-out_m\n ynew = ynew/out_s\n return X, ynew, w\n\ndef targets_multi_output(num_output_classes):\n def func(X, y, w):\n return X, {\n \"cls\": tf.one_hot(tf.cast(y[:, :, 0], tf.int32), num_output_classes), \n \"charge\": y[:, :, 1:2],\n \"pt\": y[:, :, 2:3],\n \"eta\": y[:, :, 3:4],\n \"sin_phi\": y[:, :, 4:5],\n \"cos_phi\": y[:, :, 5:6],\n \"energy\": y[:, :, 6:7],\n }, w\n return func\n\ndef make_model(config, dtype):\n model = config['parameters']['model']\n if model == 'gnn':\n return make_gnn(config, dtype)\n elif model == 'transformer':\n return make_transformer(config, dtype)\n elif model == 'dense':\n return make_dense(config, dtype)\n raise KeyError(\"Unknown model type {}\".format(model))\n\ndef make_gnn(config, dtype):\n activation = getattr(tf.nn, config['parameters']['activation'])\n\n parameters = [\n 'bin_size',\n 'num_convs_id',\n 'num_convs_reg',\n 'num_hidden_id_enc',\n 'num_hidden_id_dec',\n 'num_hidden_reg_enc',\n 'num_hidden_reg_dec',\n 'num_neighbors',\n 'hidden_dim_id',\n 'hidden_dim_reg',\n 'dist_mult',\n 'distance_dim',\n 'dropout',\n 'skip_connection'\n ]\n kwargs = {par: config['parameters'][par] for par in parameters}\n\n model = PFNet(\n multi_output=config[\"setup\"][\"multi_output\"],\n num_input_classes=config[\"dataset\"][\"num_input_classes\"],\n num_output_classes=config[\"dataset\"][\"num_output_classes\"],\n num_momentum_outputs=config[\"dataset\"][\"num_momentum_outputs\"],\n activation=activation,\n **kwargs\n )\n\n return model\n\ndef make_transformer(config, dtype):\n parameters = [\n 'num_layers', 'd_model', 'num_heads', 'dff', 'support', 'dropout'\n ]\n kwargs = {par: config['parameters'][par] for par in parameters}\n\n model = Transformer(\n multi_output=config[\"setup\"][\"multi_output\"],\n num_input_classes=config[\"dataset\"][\"num_input_classes\"],\n num_output_classes=config[\"dataset\"][\"num_output_classes\"],\n num_momentum_outputs=config[\"dataset\"][\"num_momentum_outputs\"],\n dtype=dtype,\n **kwargs\n )\n return model\n\ndef make_dense(config, dtype):\n model = DummyNet(\n num_input_classes=config[\"dataset\"][\"num_input_classes\"],\n num_output_classes=config[\"dataset\"][\"num_output_classes\"],\n num_momentum_outputs=config[\"dataset\"][\"num_momentum_outputs\"],\n )\n return model\n\ndef eval_model(X, ygen, ycand, model, config, outdir, global_batch_size):\n import scipy\n y_pred = model.predict(X, batch_size=global_batch_size)\n y_pred_raw_ids = y_pred[:, :, :config[\"dataset\"][\"num_output_classes\"]]\n \n #softmax score must be over a threshold 0.6 to call it a particle (prefer low fake rate to high efficiency)\n # y_pred_id_sm = scipy.special.softmax(y_pred_raw_ids, axis=-1)\n # y_pred_id_sm[y_pred_id_sm < 0.] = 0.0\n\n msk = np.ones(y_pred_raw_ids.shape, dtype=np.bool)\n\n #Use thresholds for charged and neutral hadrons based on matching the DelphesPF fake rate\n # msk[y_pred_id_sm[:, :, 1] < 0.8, 1] = 0\n # msk[y_pred_id_sm[:, :, 2] < 0.025, 2] = 0\n y_pred_raw_ids = y_pred_raw_ids*msk\n\n y_pred_id = np.argmax(y_pred_raw_ids, axis=-1)\n\n y_pred_id = np.concatenate([np.expand_dims(y_pred_id, axis=-1), y_pred[:, :, config[\"dataset\"][\"num_output_classes\"]:]], axis=-1)\n np_outfile = \"{}/pred.npz\".format(outdir)\n print(\"saving output to {}\".format(np_outfile))\n np.savez(np_outfile, X=X, ygen=ygen, ycand=ycand, ypred=y_pred_id, ypred_raw=y_pred_raw_ids)\n\ndef freeze_model(model, config, outdir):\n full_model = tf.function(lambda x: model(x, training=False))\n full_model = full_model.get_concrete_function(\n tf.TensorSpec((None, None, config[\"dataset\"][\"num_input_features\"]), tf.float32))\n from tensorflow.python.framework import convert_to_constants\n frozen_func = convert_to_constants.convert_variables_to_constants_v2(full_model)\n graph = tf.compat.v1.graph_util.remove_training_nodes(frozen_func.graph.as_graph_def())\n \n tf.io.write_graph(graph_or_graph_def=graph,\n logdir=\"{}/model_frozen\".format(outdir),\n name=\"frozen_graph.pb\",\n as_text=False)\n tf.io.write_graph(graph_or_graph_def=graph,\n logdir=\"{}/model_frozen\".format(outdir),\n name=\"frozen_graph.pbtxt\",\n as_text=True)\n\nclass FlattenedCategoricalAccuracy(tf.keras.metrics.CategoricalAccuracy):\n def __init__(self, use_weights=False, **kwargs):\n super(FlattenedCategoricalAccuracy, self).__init__(**kwargs)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n #flatten the batch dimension\n _y_true = tf.reshape(y_true, (tf.shape(y_true)[0]*tf.shape(y_true)[1], tf.shape(y_true)[2]))\n _y_pred = tf.reshape(y_pred, (tf.shape(y_pred)[0]*tf.shape(y_pred)[1], tf.shape(y_pred)[2]))\n super(FlattenedCategoricalAccuracy, self).update_state(_y_true, _y_pred, None)\n\nclass FlattenedMeanIoU(tf.keras.metrics.MeanIoU):\n def __init__(self, use_weights=False, **kwargs):\n super(FlattenedMeanIoU, self).__init__(**kwargs)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n #flatten the batch dimension\n _y_true = tf.reshape(y_true, (tf.shape(y_true)[0]*tf.shape(y_true)[1], tf.shape(y_true)[2]))\n _y_pred = tf.reshape(y_pred, (tf.shape(y_pred)[0]*tf.shape(y_pred)[1], tf.shape(y_pred)[2]))\n super(FlattenedMeanIoU, self).update_state(_y_true, _y_pred, None)\n\nclass LearningRateLoggingCallback(tf.keras.callbacks.Callback):\n # def __init__(self, opt, **kwargs):\n # super(LearningRateLoggingCallback, self).__init__(**kwargs)\n # self.opt = opt\n def on_epoch_end(self, epoch, numpy_logs):\n lr = self.model.optimizer._decayed_lr(tf.float32).numpy()\n tf.summary.scalar('learning rate', data=lr, step=epoch)\n\ndef main(args, yaml_path, config):\n\n #Switch off multi-output for the evaluation for backwards compatibility\n multi_output = True\n if args.action == \"eval\":\n multi_output = False\n\n tf.config.run_functions_eagerly(config['tensorflow']['eager'])\n\n from tfmodel.data import Dataset\n cds = config[\"dataset\"]\n\n dataset_def = Dataset(\n num_input_features=int(cds[\"num_input_features\"]),\n num_output_features=int(cds[\"num_output_features\"]),\n padded_num_elem_size=int(cds[\"padded_num_elem_size\"]),\n raw_path=cds.get(\"raw_path\", None),\n raw_files=cds.get(\"raw_files\", None),\n processed_path=cds[\"processed_path\"],\n validation_file_path=cds[\"validation_file_path\"],\n schema=cds[\"schema\"]\n )\n\n if args.action == \"data\":\n dataset_def.process(\n config[\"dataset\"][\"num_files_per_chunk\"]\n )\n return\n\n global_batch_size = config['setup']['batch_size']\n config['setup']['multi_output'] = multi_output\n\n model_name = os.path.splitext(os.path.basename(yaml_path))[0] + \"-\" + str(uuid.uuid4())[:8]\n print(\"model_name=\", model_name)\n\n tfr_files = sorted(glob.glob(dataset_def.processed_path))\n if len(tfr_files) == 0:\n raise Exception(\"Could not find any files in {}\".format(dataset_def.processed_path))\n\n random.shuffle(tfr_files)\n dataset = tf.data.TFRecordDataset(tfr_files).map(dataset_def.parse_tfr_element, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n num_events = 0\n for i in dataset:\n num_events += 1\n print(\"dataset loaded, len={}\".format(num_events))\n\n n_train = config['setup']['num_events_train']\n n_test = config['setup']['num_events_test']\n n_epochs = config['setup']['num_epochs']\n weight_func = weight_functions[config['setup']['sample_weights']]\n assert(n_train + n_test <= num_events)\n\n ps = (\n tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_input_features]),\n tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_output_features]),\n tf.TensorShape([dataset_def.padded_num_elem_size, ])\n )\n\n ds_train = dataset.take(n_train).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)\n ds_test = dataset.skip(n_train).take(n_test).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)\n\n if multi_output:\n ds_train = ds_train.map(targets_multi_output(config['dataset']['num_output_classes']))\n ds_test = ds_test.map(targets_multi_output(config['dataset']['num_output_classes']))\n\n ds_train_r = ds_train.repeat(n_epochs)\n ds_test_r = ds_test.repeat(n_epochs)\n\n #small test dataset used in the callback for making monitoring plots\n #X_test = np.concatenate(list(ds_test.take(100).map(lambda x,y,w: x).as_numpy_iterator()))\n #y_test = np.concatenate(list(ds_test.take(100).map(lambda x,y,w: tf.concat(y, axis=-1)).as_numpy_iterator()))\n\n weights = config['setup']['weights']\n if args.weights:\n weights = args.weights\n if weights is None:\n outdir = 'experiments/{}'.format(model_name)\n if os.path.isdir(outdir):\n print(\"Output directory exists: {}\".format(outdir), file=sys.stderr)\n sys.exit(1)\n else:\n outdir = os.path.dirname(weights)\n\n try:\n gpus = [int(x) for x in os.environ.get(\"CUDA_VISIBLE_DEVICES\", \"0\").split(\",\")]\n num_gpus = len(gpus)\n print(\"num_gpus=\", num_gpus)\n if num_gpus > 1:\n strategy = tf.distribute.MirroredStrategy()\n global_batch_size = num_gpus * global_batch_size\n else:\n strategy = tf.distribute.OneDeviceStrategy(\"gpu:0\")\n except Exception as e:\n print(\"fallback to CPU\", e)\n strategy = tf.distribute.OneDeviceStrategy(\"cpu\")\n num_gpus = 0\n\n actual_lr = global_batch_size*float(config['setup']['lr'])\n \n Xs = []\n ygens = []\n ycands = []\n #for faster loading \n if args.action == \"train\":\n dataset_def.val_filelist = dataset_def.val_filelist[:1]\n\n for fi in dataset_def.val_filelist[:10]:\n print(fi)\n X, ygen, ycand = dataset_def.prepare_data(fi)\n\n Xs.append(np.concatenate(X))\n ygens.append(np.concatenate(ygen))\n ycands.append(np.concatenate(ycand))\n\n X_val = np.concatenate(Xs)\n ygen_val = np.concatenate(ygens)\n ycand_val = np.concatenate(ycands)\n\n with strategy.scope():\n if config['setup']['dtype'] == 'float16':\n if multi_output:\n raise Exception(\"float16 and multi_output are not supported at the same time\")\n\n model_dtype = tf.dtypes.float16\n from tensorflow.keras.mixed_precision import experimental as mixed_precision\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_policy(policy)\n\n opt = mixed_precision.LossScaleOptimizer(\n tf.keras.optimizers.Adam(learning_rate=lr_schedule),\n loss_scale=\"dynamic\"\n )\n else:\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n actual_lr,\n decay_steps=1000,\n decay_rate=0.99,\n staircase=True\n )\n\n model_dtype = tf.dtypes.float32\n opt = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n\n #if config['setup']['multi_output']:\n # from tfmodel.PCGrad_tf import PCGrad\n # opt = PCGrad(tf.compat.v1.train.AdamOptimizer(actual_lr))\n\n if args.action==\"train\" or args.action==\"eval\":\n model = make_model(config, model_dtype)\n\n model.compile(\n loss={\n \"cls\": tf.keras.losses.CategoricalCrossentropy(from_logits=False),\n \"charge\": tf.keras.losses.MeanSquaredError(),\n \"pt\": tf.keras.losses.MeanSquaredLogarithmicError(),\n \"eta\": tf.keras.losses.MeanSquaredError(),\n \"sin_phi\": tf.keras.losses.MeanSquaredError(),\n \"cos_phi\": tf.keras.losses.MeanSquaredError(),\n \"energy\": tf.keras.losses.MeanSquaredLogarithmicError(),\n },\n optimizer=opt,\n sample_weight_mode='temporal',\n loss_weights={\n \"cls\": config[\"dataset\"][\"classification_loss_coef\"],\n \"charge\": config[\"dataset\"][\"charge_loss_coef\"],\n \"pt\": config[\"dataset\"][\"pt_loss_coef\"],\n \"eta\": config[\"dataset\"][\"eta_loss_coef\"],\n \"sin_phi\": config[\"dataset\"][\"sin_phi_loss_coef\"],\n \"cos_phi\": config[\"dataset\"][\"cos_phi_loss_coef\"],\n \"energy\": config[\"dataset\"][\"energy_loss_coef\"],\n },\n metrics={\n \"cls\": [\n FlattenedCategoricalAccuracy(name=\"acc_unweighted\", dtype=tf.float64),\n ]\n }\n )\n\n #Evaluate model once to build the layers\n print(X_val.shape)\n model(tf.cast(X_val[:5], model_dtype))\n model.summary()\n #import pdb;pdb.set_trace()\n\n initial_epoch = 0\n if weights:\n model.load_weights(weights)\n initial_epoch = int(weights.split(\"/\")[-1].split(\"-\")[1])\n\n if args.action==\"train\":\n #file_writer_cm = tf.summary.create_file_writer(outdir + '/val_extra')\n callbacks = prepare_callbacks(\n model, outdir\n )\n callbacks.append(LearningRateLoggingCallback())\n\n #callbacks = []\n\n fit_result = model.fit(\n ds_train_r, validation_data=ds_test_r, epochs=initial_epoch+n_epochs, callbacks=callbacks,\n steps_per_epoch=n_train//global_batch_size, validation_steps=n_test//global_batch_size,\n initial_epoch=initial_epoch\n )\n with open(\"{}/history_{}.json\".format(outdir, initial_epoch), \"w\") as fi:\n json.dump(fit_result.history, fi)\n model.save(outdir + \"/model_full\", save_format=\"tf\")\n \n if args.action==\"eval\":\n eval_model(X_val, ygen_val, ycand_val, model, config, outdir, global_batch_size)\n freeze_model(model, config, outdir)\n\n if args.action==\"time\":\n synthetic_timing_data = []\n for iteration in range(config[\"timing\"][\"num_iter\"]):\n numev = config[\"timing\"][\"num_ev\"]\n for evsize in [128*10, 128*20, 128*30, 128*40, 128*50, 128*60, 128*70, 128*80, 128*90, 128*100]:\n for batch_size in [1,2,3,4]:\n x = np.random.randn(batch_size, evsize, config[\"dataset\"][\"num_input_features\"]).astype(np.float32)\n\n model = make_model(config, model_dtype)\n model(x)\n\n if weights:\n model.load_weights(weights)\n\n t0 = time.time()\n for i in range(numev//batch_size):\n model(x)\n t1 = time.time()\n dt = t1 - t0\n\n time_per_event = 1000.0*(dt / numev)\n synthetic_timing_data.append(\n [{\"iteration\": iteration, \"batch_size\": batch_size, \"event_size\": evsize, \"time_per_event\": time_per_event}])\n print(\"Synthetic random data: batch_size={} event_size={}, time={:.2f} ms/ev\".format(batch_size, evsize, time_per_event))\n with open(\"{}/synthetic_timing.json\".format(outdir), \"w\") as fi:\n json.dump(synthetic_timing_data, fi)\n", "sub_path": "mlpf/tfmodel/model_setup.py", "file_name": "model_setup.py", "file_ext": "py", "file_size_in_byte": 27341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorflow.constant", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.math.pow", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.losses.sigmoid_focal_crossentropy", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow_addons.losses", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist2d", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "tensorflow.image.decode_png", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.arctan2", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "numpy.arctan2", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "tensorflow.concat", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 202, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 213, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 214, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.summary.image", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 258, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 259, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 260, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 260, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 263, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 266, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 268, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 273, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 275, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 275, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 275, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.TensorBoard", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 279, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.TerminateOnNaN", "line_number": 288, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 288, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 291, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 303, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 305, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 315, "usage_type": "attribute"}, {"api_name": "tensorflow.sqrt", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 316, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 316, "usage_type": "attribute"}, {"api_name": "tensorflow.ones_like", "line_number": 321, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 322, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 322, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 338, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 359, "usage_type": "attribute"}, {"api_name": "tfmodel.model.PFNet", "line_number": 379, "usage_type": "call"}, {"api_name": "tfmodel.model.Transformer", "line_number": 396, "usage_type": "call"}, {"api_name": "tfmodel.model.DummyNet", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 423, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 435, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 438, "usage_type": "call"}, {"api_name": "tensorflow.TensorSpec", "line_number": 440, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 440, "usage_type": "attribute"}, {"api_name": "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2", "line_number": 442, "usage_type": "call"}, {"api_name": "tensorflow.python.framework.convert_to_constants", "line_number": 442, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.graph_util.remove_training_nodes", "line_number": 443, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 443, "usage_type": "attribute"}, {"api_name": "tensorflow.io.write_graph", "line_number": 445, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 445, "usage_type": "attribute"}, {"api_name": "tensorflow.io.write_graph", "line_number": 449, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 449, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 454, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 460, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 460, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 461, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 461, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 464, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 470, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 470, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 471, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 471, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 474, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 479, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 480, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 480, "usage_type": "attribute"}, {"api_name": "tensorflow.config.run_functions_eagerly", "line_number": 489, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 489, "usage_type": "attribute"}, {"api_name": "tfmodel.data.Dataset", "line_number": 494, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 514, "usage_type": "call"}, {"api_name": "os.path", "line_number": 514, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 514, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 514, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 517, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 521, "usage_type": "call"}, {"api_name": "tensorflow.data.TFRecordDataset", "line_number": 522, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 522, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorShape", "line_number": 536, "usage_type": "call"}, {"api_name": "tensorflow.TensorShape", "line_number": 537, "usage_type": "call"}, {"api_name": "tensorflow.TensorShape", "line_number": 538, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 560, "usage_type": "call"}, {"api_name": "os.path", "line_number": 560, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 561, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 562, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 564, "usage_type": "call"}, {"api_name": "os.path", "line_number": 564, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 567, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 567, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute.MirroredStrategy", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 571, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute.OneDeviceStrategy", "line_number": 574, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 574, "usage_type": "attribute"}, {"api_name": "tensorflow.distribute.OneDeviceStrategy", "line_number": 577, "usage_type": "call"}, {"api_name": "tensorflow.distribute", "line_number": 577, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 594, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 597, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 598, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 599, "usage_type": "call"}, {"api_name": "tensorflow.dtypes", "line_number": 606, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.mixed_precision.experimental.Policy", "line_number": 608, "usage_type": "call"}, {"api_name": "tensorflow.keras.mixed_precision.experimental", "line_number": 608, "usage_type": "name"}, {"api_name": "tensorflow.keras.mixed_precision.experimental.set_policy", "line_number": 609, "usage_type": "call"}, {"api_name": "tensorflow.keras.mixed_precision.experimental", "line_number": 609, "usage_type": "name"}, {"api_name": "tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer", "line_number": 611, "usage_type": "call"}, {"api_name": "tensorflow.keras.mixed_precision.experimental", "line_number": 611, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 612, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 612, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.schedules.ExponentialDecay", "line_number": 616, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 616, "usage_type": "attribute"}, {"api_name": "tensorflow.dtypes", "line_number": 623, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 624, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 624, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 635, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 635, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredError", "line_number": 636, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 636, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredLogarithmicError", "line_number": 637, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 637, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredError", "line_number": 638, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 638, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredError", "line_number": 639, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 639, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredError", "line_number": 640, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 640, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.MeanSquaredLogarithmicError", "line_number": 641, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 641, "usage_type": "attribute"}, {"api_name": "tensorflow.float64", "line_number": 656, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 663, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 687, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 700, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 700, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 708, "usage_type": "call"}, {"api_name": "time.time", "line_number": 711, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 719, "usage_type": "call"}]} +{"seq_id": "190995763", "text": "import dash\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.graph_objs as go\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv('data.csv')\r\n\r\napp = dash.Dash()\r\n\r\nyear_options = []\r\nfor year in df['year'].unique():\r\n year_options.append({'label':str(year), 'value':year})\r\n\r\napp.layout = html.Div(\r\n [\r\n dcc.Graph(\r\n id = 'graph'\r\n ),\r\n dcc.Dropdown(\r\n id='year-picker',\r\n options=year_options,\r\n value=df['year'].min()\r\n )\r\n ]\r\n)\r\n\r\n@app.callback(\r\n Output('graph','figure'),\r\n [Input('year-picker','value')]\r\n)\r\ndef update_figure(selected_year):\r\n \r\n filtered_df = df[df['year'] == selected_year]\r\n\r\n traces = []\r\n for continent_name in filtered_df['continent'].unique():\r\n df_by_continent = filtered_df[filtered_df['continent'] == continent_name]\r\n traces.append(\r\n go.Scatter(\r\n x = df_by_continent['gdpPercap'],\r\n y = df_by_continent['lifeExp'],\r\n text=df_by_continent['country'],\r\n mode='markers',\r\n marker={'size': 15},\r\n opacity=0.7\r\n )\r\n )\r\n return {\r\n 'data': traces,\r\n 'layout': go.Layout(\r\n title='Scatter Plot',\r\n xaxis={\r\n 'title': 'GDP Per Capita',\r\n 'type': 'log'\r\n },\r\n yaxis={\r\n 'title': 'Life Expectency'\r\n }\r\n )\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n\r\n", "sub_path": "dash_basic/4_dash_with_realdata.py", "file_name": "4_dash_with_realdata.py", "file_ext": "py", "file_size_in_byte": 1638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 10, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 16, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 21, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 41, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 41, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 52, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 52, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 30, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "359484083", "text": "# coding=utf-8\nfrom __future__ import absolute_import\n\nfrom .routes import urlpatterns\nfrom utils.verify import verify_token\nfrom errors.base_errors import APIError\nfrom flask import Blueprint, request, current_app\nfrom utils.base_utils import make_json_response, route_inject\n\n\nbp_name = \"user\"\n\nuser_api_endpoints = [\n \"{}.delete_token\".format(bp_name),\n \"{}.set_alias\".format(bp_name),\n \"{}.get_alias\".format(bp_name)\n]\n\nblueprint = Blueprint(bp_name, __name__)\n\nroute_inject(blueprint, urlpatterns)\n\n\n@blueprint.before_request\ndef before_request():\n if request.endpoint in user_api_endpoints:\n verify_token(current_app.config.get(\"DEBUG\"))\n\n\n@blueprint.errorhandler(APIError)\ndef blueprint_api_err(err):\n return make_json_response(err)\n", "sub_path": "server/blueprints/user/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Blueprint", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.base_utils.route_inject", "line_number": 21, "usage_type": "call"}, {"api_name": "routes.urlpatterns", "line_number": 21, "usage_type": "argument"}, {"api_name": "flask.request.endpoint", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.verify.verify_token", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.current_app.config.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.base_utils.make_json_response", "line_number": 32, "usage_type": "call"}, {"api_name": "errors.base_errors.APIError", "line_number": 30, "usage_type": "argument"}]} +{"seq_id": "536054091", "text": "from typing import Tuple, List, Dict\nfrom environments.environment_abstract import Environment, State\nimport random; random.seed(0)\n\n\ndef policy_evaluation_step(env: Environment, states: List[State], state_vals: Dict[State, float],\n policy: Dict[State, List[float]], discount: float) -> Tuple[float, Dict[State, float]]:\n change: float = 0.0\n\n for s in states:\n v = state_vals[s]\n\n new_v = 0\n for a in env.get_actions():\n r, next_states, t_probs = env.state_action_dynamics(s, a)\n new_v += policy[s][a] * (r + discount * sum(p * state_vals[s_pr] for s_pr, p in zip(next_states, t_probs)))\n state_vals[s] = new_v\n\n change = max(change, abs(v - state_vals[s]))\n\n return change, state_vals\n\n\ndef q_learning_step(env: Environment, state: State, action_vals: Dict[State, List[float]], epsilon: float,\n learning_rate: float, discount: float):\n if random.random() < epsilon:\n action = random.choice(env.get_actions())\n else:\n action = max(zip(env.get_actions(), action_vals[state]), key=lambda x: x[1])[0]\n\n state_next, r = env.sample_transition(state, action)\n\n action_vals[state][action] += learning_rate * (r + discount * max(\n action_vals[state_next][a] - action_vals[state][action] for a in env.get_actions()))\n\n return state_next, action_vals\n", "sub_path": "assignments_code/assignment2.py", "file_name": "assignment2.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "random.seed", "line_number": 3, "usage_type": "call"}, {"api_name": "environments.environment_abstract.Environment", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "environments.environment_abstract.State", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 7, "usage_type": "name"}, {"api_name": "environments.environment_abstract.State", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 7, "usage_type": "name"}, {"api_name": "environments.environment_abstract.Environment", "line_number": 24, "usage_type": "name"}, {"api_name": "environments.environment_abstract.State", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "random.random", "line_number": 26, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "69936569", "text": "# -*- coding: utf-8 -*-\n# Copyright 2018 Mobicage NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nimport base64\nimport datetime\nimport json\nimport logging\nfrom babel.dates import format_datetime, get_timezone\nfrom types import NoneType\n\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import db, ndb\nfrom google.appengine.ext.deferred import deferred\n\nfrom mcfw.consts import MISSING\nfrom mcfw.properties import azzert\nfrom mcfw.rpc import arguments, returns\nfrom rogerthat.bizz.app import get_app\nfrom rogerthat.bizz.service import re_index\nfrom rogerthat.consts import SCHEDULED_QUEUE\nfrom rogerthat.dal import parent_ndb_key\nfrom rogerthat.dal.service import get_service_identity, get_default_service_identity\nfrom rogerthat.models import App, Image\nfrom rogerthat.models.news import NewsItem, NewsItemImage\nfrom rogerthat.rpc import users\nfrom rogerthat.rpc.service import BusinessException\nfrom rogerthat.rpc.users import get_current_session\nfrom rogerthat.service.api import app, news\nfrom rogerthat.to.news import NewsActionButtonTO, NewsTargetAudienceTO, NewsFeedNameTO\nfrom rogerthat.to.service import UserDetailsTO\nfrom rogerthat.utils import now, channel\nfrom rogerthat.utils.service import get_service_identity_tuple, get_service_user_from_service_identity_user\nfrom rogerthat.utils.transactions import run_in_xg_transaction\nfrom shop.bizz import update_regiomanager_statistic, get_payed\nfrom shop.business.legal_entities import get_vat_pct\nfrom shop.constants import STORE_MANAGER\nfrom shop.dal import get_customer\nfrom shop.exceptions import NoCreditCardException, AppNotFoundException\nfrom shop.models import Contact, Product, RegioManagerTeam, Order, OrderNumber, OrderItem, Charge\nfrom shop.to import OrderItemTO\nfrom solutions import translate as common_translate\nfrom solutions.common import SOLUTION_COMMON\nfrom solutions.common.bizz import SolutionModule, OrganizationType, facebook, twitter\nfrom solutions.common.bizz.cityapp import get_apps_in_country_count\nfrom solutions.common.bizz.service import get_inbox_message_sender_details, new_inbox_message, \\\n send_inbox_message_update, send_message_updates\nfrom solutions.common.dal import get_solution_settings\nfrom solutions.common.dal.cityapp import get_cityapp_profile, get_service_user_for_city\nfrom solutions.common.models import SolutionInboxMessage, SolutionScheduledBroadcast\nfrom solutions.common.models.budget import Budget\nfrom solutions.common.models.news import NewsCoupon, SolutionNewsItem, NewsSettings, NewsSettingsTags, NewsReview\nfrom solutions.common.restapi.store import generate_and_put_order_pdf_and_send_mail\nfrom solutions.common.to.news import SponsoredNewsItemCount, NewsBroadcastItemTO, NewsBroadcastItemListTO, \\\n NewsStatsTO, NewsAppTO\nfrom solutions.flex import SOLUTION_FLEX\n\nFREE_SPONSORED_ITEMS_PER_APP = 5\nSPONSOR_DAYS = 7\n\n\nclass AllNewsSentToReviewWarning(BusinessException):\n pass\n\n\n@returns(NewsBroadcastItemListTO)\n@arguments(cursor=unicode, service_identity=unicode, tag=unicode)\ndef get_news(cursor=None, service_identity=None, tag=None):\n if not tag or tag is MISSING:\n tag = u'news'\n news_list = news.list_news(cursor, 5, service_identity, tag=tag)\n result = NewsBroadcastItemListTO()\n result.result = []\n result.cursor = news_list.cursor\n\n for news_item in news_list.result:\n scheduled_item = get_scheduled_broadcast(news_item.id)\n if scheduled_item:\n on_facebook = scheduled_item.broadcast_on_facebook\n on_twitter = scheduled_item.broadcast_on_twitter\n result_item = NewsBroadcastItemTO.from_news_item_to(news_item, on_facebook, on_twitter)\n else:\n result_item = NewsBroadcastItemTO.from_news_item_to(news_item)\n result.result.append(result_item)\n\n return result\n\n\n@returns(NewsStatsTO)\n@arguments(news_id=(int, long), service_identity=unicode)\ndef get_news_statistics(news_id, service_identity=None):\n news_item = news.get(news_id, service_identity, True)\n apps_rpc = db.get([App.create_key(s.app_id) for s in news_item.statistics])\n result = NewsStatsTO(news_item=NewsBroadcastItemTO.from_news_item_to(news_item))\n result.apps = [NewsAppTO.from_model(model) for model in apps_rpc]\n return result\n\n\ndef _save_coupon_news_id(news_item_id, coupon):\n \"\"\"\n Args:\n news_item_id (int)\n coupon (NewsCoupon)\n \"\"\"\n coupon.news_id = news_item_id\n coupon.put()\n\n\ndef _app_uses_custom_organization_types(language):\n \"\"\"Check if the app has any translated organization type\"\"\"\n translations = {\n translation.key: translation.value for translation in app.get_translations(language)\n }\n\n if translations:\n for translation_key in OrganizationType.get_translation_keys().values():\n if translations.get(translation_key):\n return True\n\n return False\n\n\ndef get_regional_apps_of_item(news_item, default_app_id):\n \"\"\"Returns a list of regional apps of a news item if found\"\"\"\n regional_apps = []\n for app_id in news_item.app_ids:\n if app_id in (App.APP_ID_OSA_LOYALTY, App.APP_ID_ROGERTHAT, default_app_id):\n continue\n regional_apps.append(app_id)\n return regional_apps\n\n\n@ndb.transactional()\ndef create_regional_news_item(news_item, regional_apps, service_user, service_identity, paid=False):\n # type: (NewsItem, list[unicode], users.User, unicode, bool) -> SolutionNewsItem\n sln_item_key = SolutionNewsItem.create_key(news_item.id, service_user)\n settings_key = NewsSettings.create_key(service_user, service_identity)\n sln_item, news_settings = ndb.get_multi([sln_item_key, settings_key]) # type: (SolutionNewsItem, NewsSettings)\n if not sln_item:\n sln_item = SolutionNewsItem(key=sln_item_key)\n\n if news_item.scheduled_at:\n publish_time = news_item.scheduled_at\n else:\n publish_time = news_item.timestamp\n\n sln_item.publish_time = publish_time\n sln_item.app_ids = regional_apps\n sln_item.service_identity = service_identity\n if paid or news_settings and NewsSettingsTags.FREE_REGIONAL_NEWS in news_settings.tags:\n sln_item.paid = True\n sln_item.put()\n return sln_item\n\n\ndef check_budget(service_user, service_identity):\n keys = [Budget.create_key(service_user), NewsSettings.create_key(service_user, service_identity)]\n budget, news_settings = ndb.get_multi(keys) # type: (Budget, NewsSettings)\n if not news_settings or NewsSettingsTags.FREE_REGIONAL_NEWS not in news_settings.tags:\n if not budget or budget.balance <= 0:\n raise BusinessException('insufficient_budget')\n\n\ndef publish_item(service_identity_user, app_id, host, is_free_regional_news, order_items, coupon,\n should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token, **kwargs):\n service_user, identity = get_service_identity_tuple(service_identity_user)\n news_id = kwargs.get('news_id')\n sticky = kwargs.pop('sticky', False)\n if news_id:\n news_type = kwargs.pop('news_type')\n else:\n news_type = kwargs.get('news_type')\n qr_code_caption = kwargs.get('qr_code_caption')\n scheduled_at = kwargs.get('scheduled_at')\n\n def trans():\n news_item = news.publish(accept_missing=True, sticky=sticky, **kwargs)\n if should_save_coupon:\n _save_coupon_news_id(news_item.id, coupon)\n elif news_type == NewsItem.TYPE_QR_CODE and qr_code_caption is not MISSING and qr_code_caption and news_id:\n news_coupon = NewsCoupon.get_by_news_id(service_identity_user, news_id)\n if news_coupon:\n news_coupon.content = qr_code_caption\n news_coupon.put()\n else:\n logging.warn('Not updating qr_code_caption for non-existing coupon for news with id %d',\n news_id)\n if order_items:\n create_and_pay_news_order(service_user, news_item.id, order_items)\n regional_apps = get_regional_apps_of_item(news_item, app_id)\n if regional_apps:\n if not news_id and not is_free_regional_news:\n # check for budget on creation only\n check_budget(service_user, identity)\n deferred.defer(create_regional_news_item, news_item, regional_apps, service_user, identity,\n paid=is_free_regional_news, _transactional=True)\n return news_item\n\n try:\n news_item = run_in_xg_transaction(trans)\n if broadcast_on_facebook or broadcast_on_twitter:\n if scheduled_at is not MISSING and scheduled_at > 0:\n schedule_post_to_social_media(service_user, host, broadcast_on_facebook,\n broadcast_on_twitter, facebook_access_token,\n news_item.id, scheduled_at)\n else:\n post_to_social_media(service_user, broadcast_on_facebook,\n broadcast_on_twitter, facebook_access_token,\n news_item.id)\n\n return NewsBroadcastItemTO.from_news_item_to(news_item, broadcast_on_facebook, broadcast_on_twitter)\n except:\n if should_save_coupon:\n db.delete_async(coupon)\n raise\n\n\ndef get_news_review_message(lang, timezone, header=None, **data):\n def trans(term, *args, **kwargs):\n return common_translate(lang, SOLUTION_COMMON, unicode(term), *args, **kwargs)\n\n message = u'{}\\n\\n'.format(header or trans('news_review_requested'))\n message += u'{}: {}\\n'.format(trans('message-title'), data['title'])\n message += u'{}: {}\\n'.format(trans('inbox-message'), data['message'])\n\n action_buttons = [\n '{}'.format(button.caption) for button in data['action_buttons']\n ]\n message += u'{}: {}\\n'.format(trans('action_button'), ','.join(action_buttons))\n\n scheduled_at = data.get('scheduled_at')\n if scheduled_at:\n d = datetime.datetime.utcfromtimestamp(scheduled_at)\n date_str = format_datetime(d, locale=lang, tzinfo=get_timezone(timezone))\n message += u'{}\\n'.format(trans('scheduled_for_datetime', datetime=date_str))\n return message\n\n\ndef store_image(image_data):\n _, content = image_data.split(',')\n image = Image(blob=base64.b64decode(content))\n image.put()\n return image\n\n\ndef send_news_review_message(sln_settings, sender_service, review_key, image_url, **data):\n msg = get_news_review_message(sln_settings.main_language, sln_settings.timezone, **data)\n sender_user_details = get_inbox_message_sender_details(sender_service)\n picture_urls = []\n if image_url:\n picture_urls.append(image_url)\n\n message = new_inbox_message(\n sln_settings, msg, service_identity=None,\n category=SolutionInboxMessage.CATEGORY_NEWS_REVIEW,\n category_key=review_key,\n user_details=sender_user_details,\n picture_urls=picture_urls)\n\n send_message_updates(sln_settings, u'solutions.common.news.review.update', message)\n return unicode(message.key())\n\n\ndef send_news_for_review(city_service, service_identity_user, app_id, host, is_free_regional_news, order_items, coupon,\n should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token,\n **kwargs):\n\n key = NewsReview.create_key(city_service)\n review = key.get() or NewsReview(key=key)\n review.service_identity_user = service_identity_user\n review.app_id = app_id\n review.host = host\n review.is_free_regional_news = is_free_regional_news\n review.order_items = order_items\n review.coupon_id = coupon and coupon.id\n review.broadcast_on_facebook = broadcast_on_facebook\n review.broadcast_on_twitter = broadcast_on_twitter\n review.facebook_access_token = facebook_access_token\n review.data = kwargs\n\n image_url = None\n if kwargs['image']:\n image = store_image(kwargs['image'])\n review.image_id = image.id\n image_url = u'/unauthenticated/image/%d' % review.image_id\n\n sln_settings = get_solution_settings(city_service)\n sender_service, _ = get_service_identity_tuple(service_identity_user)\n review.inbox_message_key = send_news_review_message(\n sln_settings, sender_service, unicode(key), image_url, **kwargs)\n review.put()\n\n\n@returns()\n@arguments(review_key=unicode, reason=unicode)\ndef send_news_review_reply(review_key, reason):\n review = ndb.Key(urlsafe=review_key).get()\n if review:\n service_user, identity = get_service_identity_tuple(review.service_identity_user)\n sln_settings = get_solution_settings(service_user)\n review_msg = get_news_review_message(sln_settings.main_language, sln_settings.timezone, reason, **review.data)\n sender_user_details = get_inbox_message_sender_details(review.parent_service_user)\n message = new_inbox_message(sln_settings, review_msg, service_identity=identity,\n user_details=sender_user_details)\n send_inbox_message_update(sln_settings, message, service_identity=identity)\n\n\n@returns(NewsBroadcastItemTO)\n@arguments(review_key=unicode)\ndef publish_item_from_review(review_key):\n review = ndb.Key(urlsafe=review_key).get()\n if not review:\n raise BusinessException('review item is not found!')\n\n coupon = review.coupon_id and NewsCoupon.get_by_id(review.coupon_id)\n should_save_coupon = bool(coupon)\n\n service_user, _ = get_service_identity_tuple(review.service_identity_user)\n with users.set_user(service_user):\n item = publish_item(\n review.service_identity_user, review.app_id, review.host, review.is_free_regional_news,\n review.order_items, coupon, should_save_coupon, review.broadcast_on_facebook, review.broadcast_on_twitter,\n review.facebook_access_token, **review.data)\n\n inbox_message = SolutionInboxMessage.get(review.inbox_message_key)\n if inbox_message:\n inbox_message.read = True\n inbox_message.trashed = True\n inbox_message.put()\n sln_settings = get_solution_settings(review.parent_service_user)\n send_inbox_message_update(sln_settings, inbox_message)\n\n if review.image_id:\n Image.get_by_id(review.image_id).key.delete()\n\n review.key.delete()\n return item\n\n\n@returns(NewsBroadcastItemTO)\n@arguments(service_identity_user=users.User, title=unicode, message=unicode, broadcast_type=unicode, sponsored=bool,\n image=unicode, action_button=(NoneType, NewsActionButtonTO), order_items=(NoneType, [OrderItemTO]),\n news_type=(int, long), qr_code_caption=unicode, app_ids=[unicode], scheduled_at=(int, long),\n news_id=(NoneType, int, long), broadcast_on_facebook=bool, broadcast_on_twitter=bool,\n facebook_access_token=unicode, target_audience=NewsTargetAudienceTO, role_ids=[(int, long)], host=unicode,\n tag=unicode)\ndef put_news_item(service_identity_user, title, message, broadcast_type, sponsored, image, action_button, order_items,\n news_type, qr_code_caption, app_ids, scheduled_at, news_id=None, broadcast_on_facebook=False,\n broadcast_on_twitter=False, facebook_access_token=None, target_audience=None, role_ids=None,\n host=None, tag=None):\n \"\"\"\n Creates a news item first then processes the payment if necessary (not necessary for non-promoted posts).\n If the payment was unsuccessful it will be retried in a deferred task.\n\n Args:\n service_identity_user (users.User)\n title (unicode)\n message (unicode)\n broadcast_type (unicode)\n sponsored (bool)\n image (unicode)\n action_button (NewsActionButtonTO)\n order_items (list of OrderItemTO)\n news_type (int)\n qr_code_caption (unicode)\n app_ids (list of unicode)\n scheduled_at (long)\n news_id (long): id of the news item to update. When not provided a new news item will be created.\n broadcast_on_facebook (bool)\n broadcast_on_twitter (bool)\n facebook_access_token (unicode): user or page access token\n target_audience (NewsTargetAudienceTO)\n role_ids (list of long) the list of role ids to filter sending the news to their members\n host (unicode): host of the api request (used for social media apps)\n tag(unicode)\n\n Returns:\n news_item (NewsBroadcastItemTO)\n \"\"\"\n NEWS_TAG = u'news'\n if not order_items or order_items is MISSING:\n order_items = []\n if not tag or tag is MISSING:\n tag = NEWS_TAG\n if news_type == NewsItem.TYPE_QR_CODE:\n sln_settings = get_solution_settings(get_service_user_from_service_identity_user(service_identity_user))\n azzert(SolutionModule.LOYALTY in sln_settings.modules)\n qr_code_caption = MISSING.default(qr_code_caption, title)\n sponsored_until = None\n should_save_coupon = news_type == NewsItem.TYPE_QR_CODE and not news_id\n sponsored_app_ids = set()\n si = get_service_identity(service_identity_user)\n for order_item in reversed(order_items):\n if order_item.product == Product.PRODUCT_NEWS_PROMOTION and sponsored:\n azzert(order_item.app_id)\n azzert(order_item.app_id not in sponsored_app_ids)\n sponsored_app_ids.add(order_item.app_id)\n order_item.count = get_sponsored_news_count_in_app(service_identity_user, order_item.app_id).count\n else:\n raise BusinessException('Invalid product %s' % order_item.product)\n\n if not news_id and not app_ids:\n raise BusinessException('Please select at least one app to publish this news in')\n if sponsored:\n sponsored_until_date = datetime.datetime.utcnow() + datetime.timedelta(days=SPONSOR_DAYS)\n sponsored_until = long(sponsored_until_date.strftime('%s'))\n # for sponsored news that is free in certain apps no order item is given, so add it here\n sponsored_counts = get_sponsored_news_count(service_identity_user, app_ids)\n for sponsored_count in sponsored_counts:\n if sponsored_count.remaining_free != 0 and sponsored_count.app_id in app_ids:\n sponsored_app_ids.add(sponsored_count.app_id)\n app_ids = list(sponsored_app_ids)\n\n service_user, identity = get_service_identity_tuple(service_identity_user)\n default_app = get_app(si.defaultAppId)\n if App.APP_ID_ROGERTHAT in si.appIds and App.APP_ID_ROGERTHAT not in app_ids:\n app_ids.append(App.APP_ID_ROGERTHAT)\n if default_app.demo and App.APP_ID_ROGERTHAT in app_ids:\n app_ids.remove(App.APP_ID_ROGERTHAT)\n\n feed_names = {}\n if is_regional_news_enabled(default_app):\n if tag == NEWS_TAG:\n if default_app.demo:\n # For demo apps the following rules count\n # Extra apps selected --> post in REGIONAL NEWS in the demo app\n # No extra apps selected --> post in LOCAL NEWS in the demo app\n if len(app_ids) == 1 and app_ids[0] == default_app.app_id:\n pass # LOCAL NEWS\n else:\n feed_names[default_app.app_id] = NewsFeedNameTO(\n default_app.app_id, u'regional_news') # REGIONAL NEWS\n app_ids = [default_app.app_id]\n else:\n for app_id in app_ids:\n if app_id not in (si.app_id, App.APP_ID_ROGERTHAT):\n feed_names[app_id] = NewsFeedNameTO(app_id, u'regional_news')\n else:\n if default_app.demo:\n feed_names[default_app.app_id] = NewsFeedNameTO(default_app.app_id, tag)\n else:\n for app_id in app_ids:\n feed_names[app_id] = NewsFeedNameTO(app_id, tag)\n\n kwargs = {\n 'sticky_until': sponsored_until,\n 'message': message,\n 'broadcast_type': broadcast_type,\n 'service_identity': identity,\n 'news_id': news_id,\n 'news_type': news_type,\n 'image': image,\n 'scheduled_at': scheduled_at,\n 'target_audience': target_audience,\n 'role_ids': role_ids,\n 'tags': [tag],\n }\n\n if news_type == NewsItem.TYPE_QR_CODE:\n if should_save_coupon:\n def trans():\n coupon = NewsCoupon(\n parent=NewsCoupon.create_parent_key(service_identity_user),\n content=qr_code_caption\n )\n coupon.put()\n return coupon\n coupon = db.run_in_transaction(trans)\n kwargs['qr_code_content'] = u'%s' % json.dumps({'c': coupon.id})\n kwargs['qr_code_caption'] = qr_code_caption\n elif news_type == NewsItem.TYPE_NORMAL:\n kwargs.update({\n 'action_buttons': [action_button] if action_button else [],\n 'title': title\n })\n else:\n raise BusinessException('Invalid news type')\n for key, value in kwargs.items():\n if value is MISSING:\n del kwargs[key]\n\n current_session = get_current_session()\n is_free_regional_news = (current_session and current_session.shop) or default_app.demo\n\n if sponsored:\n sticky = True\n else:\n customer = get_customer(service_user)\n if customer and customer.organization_type == OrganizationType.CITY and \\\n not _app_uses_custom_organization_types(customer.language):\n sticky = True\n if kwargs['sticky_until'] is None:\n kwargs['sticky_until'] = now()\n else:\n sticky = False\n kwargs['sticky'] = sticky\n\n if not should_save_coupon:\n coupon = None\n\n new_app_ids = list(app_ids)\n if not news_id:\n # check for city-enabled news review\n for app_id in app_ids:\n city_service = get_service_user_for_city(app_id)\n if city_service and city_service != service_user:\n city_app_profile = get_cityapp_profile(city_service)\n if city_app_profile.review_news:\n # create a city review for this app\n city_kwargs = kwargs.copy()\n city_kwargs['app_ids'] = [app_id]\n city_kwargs['feed_names'] = feed_names.get(app_id, [])\n send_news_for_review(\n city_service, service_identity_user, app_id, host, is_free_regional_news, order_items,\n coupon, should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token,\n **city_kwargs)\n # remove from current feed\n new_app_ids.remove(app_id)\n if feed_names and app_id in feed_names:\n del feed_names[app_id]\n\n if new_app_ids == [App.APP_ID_ROGERTHAT] or (not new_app_ids and len(app_ids) > 0):\n raise AllNewsSentToReviewWarning(u'news_review_all_sent_to_review')\n\n # for the rest\n kwargs['feed_names'] = feed_names.values()\n kwargs['app_ids'] = new_app_ids\n\n with users.set_user(service_user):\n return publish_item(\n service_identity_user, si.app_id, host, is_free_regional_news, order_items,\n coupon, should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token, **kwargs)\n\n\n@returns()\n@arguments(service_user=users.User, on_facebook=bool, on_twitter=bool,\n facebook_access_token=unicode, news_id=(int, long))\ndef post_to_social_media(service_user, on_facebook, on_twitter,\n facebook_access_token, news_id):\n news_item = NewsItem.get_by_id(news_id)\n if not news_item:\n logging.warn('Cannot post to social media, news item does not exist')\n return\n\n if news_item.type == NewsItem.TYPE_QR_CODE:\n logging.warn('Cannot post to social media for a coupon news type')\n return\n\n message = news_item.title + '\\n' + news_item.message\n image_content = None\n if news_item.image_id:\n news_item_image = NewsItemImage.get_by_id(news_item.image_id)\n if news_item_image:\n image_content = news_item_image.image\n\n if on_facebook and facebook_access_token:\n facebook.post_to_facebook(facebook_access_token, message, image_content)\n\n if on_twitter:\n media = []\n if image_content:\n media.append(image_content)\n twitter.update_twitter_status(service_user, message, media)\n\n\ndef post_to_social_media_scheduled(str_key):\n scheduled_broadcast = SolutionScheduledBroadcast.get(str_key)\n if not scheduled_broadcast or scheduled_broadcast.deleted:\n return\n\n news_id = scheduled_broadcast.news_id\n on_facebook = scheduled_broadcast.broadcast_on_facebook\n on_twitter = scheduled_broadcast.broadcast_on_twitter\n facebook_access_token = scheduled_broadcast.facebook_access_token\n\n service_user = scheduled_broadcast.service_user\n with users.set_user(service_user):\n post_to_social_media(service_user, on_facebook, on_twitter,\n facebook_access_token, news_id)\n scheduled_broadcast.delete()\n\n\ndef get_scheduled_broadcast(news_item_id, service_user=None, create=False):\n if service_user is None:\n service_user = users.get_current_user()\n\n key = SolutionScheduledBroadcast.create_key(news_item_id,\n service_user,\n SOLUTION_FLEX)\n scheduled_broadcast = db.get(key)\n if not scheduled_broadcast and create:\n scheduled_broadcast = SolutionScheduledBroadcast(key=key)\n\n return scheduled_broadcast\n\n\ndef schedule_post_to_social_media(service_user, host, on_facebook, on_twitter,\n facebook_access_token, news_id, scheduled_at):\n if scheduled_at < 1:\n return\n\n scheduled_broadcast = get_scheduled_broadcast(news_id, service_user, create=True)\n if scheduled_broadcast.timestamp == scheduled_at:\n return\n\n if on_facebook:\n if not facebook_access_token:\n if scheduled_broadcast.facebook_access_token:\n facebook_access_token = scheduled_broadcast.facebook_access_token\n else:\n raise ValueError('facebook access token is not provided, %s, news id: %d' % (service_user, news_id))\n\n # try to extend facebook access token first\n try:\n facebook_access_token = facebook.extend_access_token(host, facebook_access_token)\n except:\n logging.error('Cannot get an extended facebook access token', exc_info=True)\n\n if scheduled_broadcast.scheduled_task_name:\n # remove the old scheduled task\n task_name = str(scheduled_broadcast.scheduled_task_name)\n taskqueue.Queue(SCHEDULED_QUEUE).delete_tasks_by_name(task_name)\n\n scheduled_broadcast.timestamp = scheduled_at\n scheduled_broadcast.broadcast_on_facebook = on_facebook\n scheduled_broadcast.broadcast_on_twitter = on_twitter\n scheduled_broadcast.facebook_access_token = facebook_access_token\n scheduled_broadcast.news_id = news_id\n\n task = deferred.defer(post_to_social_media_scheduled,\n scheduled_broadcast.key_str,\n _countdown=scheduled_at - now(),\n _queue=SCHEDULED_QUEUE,\n _transactional=db.is_in_transaction())\n\n scheduled_broadcast.scheduled_task_name = task.name\n scheduled_broadcast.put()\n\n\n@returns()\n@arguments(service_user=users.User, news_item_id=(int, long), order_items_to=[OrderItemTO])\ndef create_and_pay_news_order(service_user, news_item_id, order_items_to):\n \"\"\"\n Creates an order, orderitems, charge and executes the charge. Should be executed in a transaction.\n Args:\n service_user (users.User)\n news_item_id (long)\n order_items_to (ist of OrderItemTO)\n\n Raises:\n NoCreditCardException\n ProductNotFoundException\n \"\"\"\n\n @db.non_transactional\n def _get_customer():\n return get_customer(service_user)\n\n @db.non_transactional\n def _get_contact():\n return Contact.get_one(customer)\n\n customer = _get_customer()\n azzert(customer)\n contact = _get_contact()\n azzert(contact)\n if not customer.stripe_valid:\n raise NoCreditCardException(customer)\n news_product_key = Product.create_key(Product.PRODUCT_NEWS_PROMOTION)\n rmt_key = RegioManagerTeam.create_key(customer.team_id)\n news_promotion_product, team = db.get((news_product_key, rmt_key))\n azzert(news_promotion_product)\n azzert(team)\n new_order_key = Order.create_key(customer.id, OrderNumber.next(team.legal_entity_key))\n vat_pct = get_vat_pct(customer, team)\n\n total_amount = 0\n for order_item in order_items_to:\n if order_item.product == Product.PRODUCT_NEWS_PROMOTION:\n total_amount += news_promotion_product.price * order_item.count\n order_item.price = news_promotion_product.price\n else:\n raise BusinessException('Invalid product \\'%s\\'' % order_item.product)\n\n vat = int(round(vat_pct * total_amount / 100))\n total_amount_vat_incl = int(round(total_amount + vat))\n now_ = now()\n to_put = []\n order = Order(\n key=new_order_key,\n date=now_,\n amount=total_amount,\n vat_pct=vat_pct,\n vat=vat,\n total_amount=total_amount_vat_incl,\n contact_id=contact.id,\n status=Order.STATUS_SIGNED,\n is_subscription_order=False,\n is_subscription_extension_order=False,\n date_signed=now_,\n manager=STORE_MANAGER,\n team_id=team.id\n )\n to_put.append(order)\n azzert(order.total_amount >= 0)\n\n for item in order_items_to:\n order_item = OrderItem(\n parent=new_order_key,\n number=item.number,\n product_code=item.product,\n count=item.count,\n comment=item.comment,\n price=item.price\n )\n order_item.app_id = item.app_id\n if order_item.product_code == Product.PRODUCT_NEWS_PROMOTION:\n order_item.news_item_id = news_item_id\n to_put.append(order_item)\n\n db.put(to_put)\n\n # Not sure if this is necessary\n deferred.defer(generate_and_put_order_pdf_and_send_mail, customer, new_order_key, service_user,\n _transactional=True)\n\n # No need for signing here, immediately create a charge.\n charge = Charge(parent=new_order_key)\n charge.date = now()\n charge.type = Charge.TYPE_ORDER_DELIVERY\n charge.amount = order.amount\n charge.vat_pct = order.vat_pct\n charge.vat = order.vat\n charge.total_amount = order.total_amount\n charge.manager = order.manager\n charge.team_id = order.team_id\n charge.status = Charge.STATUS_PENDING\n charge.date_executed = now()\n charge.currency_code = team.legal_entity.currency_code\n charge.put()\n\n # Update the regiomanager statistics so these kind of orders show up in the monthly statistics\n deferred.defer(update_regiomanager_statistic, gained_value=order.amount / 100,\n manager=order.manager, _transactional=True)\n\n # charge the credit card\n if charge.total_amount > 0:\n get_payed(customer.id, order, charge)\n else:\n charge.status = Charge.STATUS_EXECUTED\n charge.date_executed = now()\n charge.put()\n channel.send_message(service_user, 'common.billing.orders.update')\n\n\ndef delete_news(news_id):\n news.delete(news_id)\n\n\n@returns(SponsoredNewsItemCount)\n@arguments(service_identity_user=users.User, app_id=unicode)\ndef get_sponsored_news_count_in_app(service_identity_user, app_id):\n \"\"\"\n Args:\n service_identity_user (users.User)\n app_id (unicode)\n \"\"\"\n news_items = NewsItem.list_sticky_by_sender_in_app(service_identity_user, app_id).fetch(\n FREE_SPONSORED_ITEMS_PER_APP)\n count = 0\n if len(news_items) == FREE_SPONSORED_ITEMS_PER_APP:\n for news_item in news_items:\n item_stats = news_item.statistics[app_id]\n if item_stats:\n count += item_stats.reached_total\n remaining_free_items = FREE_SPONSORED_ITEMS_PER_APP - len(news_items)\n return SponsoredNewsItemCount(app_id, count, remaining_free_items)\n\n\n@returns([SponsoredNewsItemCount])\n@arguments(service_identity_user=users.User, app_ids=[unicode])\ndef get_sponsored_news_count(service_identity_user, app_ids):\n \"\"\"\n Calculate price for a news in every app, based on the average reach of the last five news items.\n First five news items in an app should be free.\n Args:\n service_identity_user (users.User)\n app_ids (list of unicode)\n Returns:\n things (list of SponsoredNewsItemCount)\n \"\"\"\n price_per_apps = []\n for app_id in app_ids:\n news_items = NewsItem.list_sticky_by_sender_in_app(service_identity_user, app_id).fetch(\n FREE_SPONSORED_ITEMS_PER_APP)\n count = 0\n if len(news_items) == FREE_SPONSORED_ITEMS_PER_APP:\n for news_item in news_items:\n item_stats = news_item.statistics[app_id]\n if item_stats:\n count += item_stats.reached_total\n remaining_free_items = FREE_SPONSORED_ITEMS_PER_APP - len(news_items)\n price_per_apps.append(SponsoredNewsItemCount(app_id, int(count / 5), remaining_free_items))\n return price_per_apps\n\n\ndef is_regional_news_enabled(app_model):\n # type: (App) -> bool\n if app_model.app_id.startswith('osa-'):\n return True\n country_code = app_model.app_id.split('-')[0].lower()\n return app_model.type == App.APP_TYPE_CITY_APP and get_apps_in_country_count(country_code) > 1\n\n\ndef get_news_reviews(service_user):\n parent_key = parent_ndb_key(service_user, SOLUTION_COMMON)\n return NewsReview.query(ancestor=parent_key)\n", "sub_path": "src/solutions/common/bizz/news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 34441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 75, "usage_type": "name"}, {"api_name": "mcfw.consts.MISSING", "line_number": 82, "usage_type": "name"}, {"api_name": "rogerthat.service.api.news.list_news", "line_number": 84, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news", "line_number": 84, "usage_type": "name"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemListTO", "line_number": 85, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO.from_news_item_to", "line_number": 94, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 94, "usage_type": "name"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO.from_news_item_to", "line_number": 96, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 96, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 79, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemListTO", "line_number": 79, "usage_type": "argument"}, {"api_name": "mcfw.rpc.arguments", "line_number": 80, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news.get", "line_number": 105, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news", "line_number": 105, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 106, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 106, "usage_type": "name"}, {"api_name": "rogerthat.models.App.create_key", "line_number": 106, "usage_type": "call"}, {"api_name": "rogerthat.models.App", "line_number": 106, "usage_type": "name"}, {"api_name": "solutions.common.to.news.NewsStatsTO", "line_number": 107, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO.from_news_item_to", "line_number": 107, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 107, "usage_type": "name"}, {"api_name": "solutions.common.to.news.NewsAppTO.from_model", "line_number": 108, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsAppTO", "line_number": 108, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 102, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsStatsTO", "line_number": 102, "usage_type": "argument"}, {"api_name": "mcfw.rpc.arguments", "line_number": 103, "usage_type": "call"}, {"api_name": "rogerthat.service.api.app.get_translations", "line_number": 125, "usage_type": "call"}, {"api_name": "rogerthat.service.api.app", "line_number": 125, "usage_type": "name"}, {"api_name": "solutions.common.bizz.OrganizationType.get_translation_keys", "line_number": 129, "usage_type": "call"}, {"api_name": "solutions.common.bizz.OrganizationType", "line_number": 129, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_ID_OSA_LOYALTY", "line_number": 140, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 140, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 140, "usage_type": "attribute"}, {"api_name": "solutions.common.models.news.SolutionNewsItem.create_key", "line_number": 149, "usage_type": "call"}, {"api_name": "solutions.common.models.news.SolutionNewsItem", "line_number": 149, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsSettings.create_key", "line_number": 150, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsSettings", "line_number": 150, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.get_multi", "line_number": 151, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 151, "usage_type": "name"}, {"api_name": "solutions.common.models.news.SolutionNewsItem", "line_number": 153, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsSettingsTags.FREE_REGIONAL_NEWS", "line_number": 163, "usage_type": "attribute"}, {"api_name": "solutions.common.models.news.NewsSettingsTags", "line_number": 163, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 146, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 146, "usage_type": "name"}, {"api_name": "solutions.common.models.budget.Budget.create_key", "line_number": 170, "usage_type": "call"}, {"api_name": "solutions.common.models.budget.Budget", "line_number": 170, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsSettings.create_key", "line_number": 170, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsSettings", "line_number": 170, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.get_multi", "line_number": 171, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 171, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsSettingsTags.FREE_REGIONAL_NEWS", "line_number": 172, "usage_type": "attribute"}, {"api_name": "solutions.common.models.news.NewsSettingsTags", "line_number": 172, "usage_type": "name"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 174, "usage_type": "call"}, {"api_name": "rogerthat.utils.service.get_service_identity_tuple", "line_number": 179, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news.publish", "line_number": 190, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news", "line_number": 190, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_QR_CODE", "line_number": 193, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 193, "usage_type": "name"}, {"api_name": "mcfw.consts.MISSING", "line_number": 193, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsCoupon.get_by_news_id", "line_number": 194, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsCoupon", "line_number": 194, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 199, "usage_type": "call"}, {"api_name": "google.appengine.ext.deferred.deferred.defer", "line_number": 208, "usage_type": "call"}, {"api_name": "google.appengine.ext.deferred.deferred", "line_number": 208, "usage_type": "name"}, {"api_name": "rogerthat.utils.transactions.run_in_xg_transaction", "line_number": 213, "usage_type": "call"}, {"api_name": "mcfw.consts.MISSING", "line_number": 215, "usage_type": "name"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO.from_news_item_to", "line_number": 224, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 224, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.delete_async", "line_number": 227, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 227, "usage_type": "name"}, {"api_name": "solutions.translate", "line_number": 233, "usage_type": "call"}, {"api_name": "solutions.common.SOLUTION_COMMON", "line_number": 233, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 246, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 246, "usage_type": "attribute"}, {"api_name": "babel.dates.format_datetime", "line_number": 247, "usage_type": "call"}, {"api_name": "babel.dates.get_timezone", "line_number": 247, "usage_type": "call"}, {"api_name": "rogerthat.models.Image", "line_number": 254, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 254, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.get_inbox_message_sender_details", "line_number": 261, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.new_inbox_message", "line_number": 266, "usage_type": "call"}, {"api_name": "solutions.common.models.SolutionInboxMessage.CATEGORY_NEWS_REVIEW", "line_number": 268, "usage_type": "attribute"}, {"api_name": "solutions.common.models.SolutionInboxMessage", "line_number": 268, "usage_type": "name"}, {"api_name": "solutions.common.bizz.service.send_message_updates", "line_number": 273, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsReview.create_key", "line_number": 281, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsReview", "line_number": 281, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsReview", "line_number": 282, "usage_type": "call"}, {"api_name": "solutions.common.dal.get_solution_settings", "line_number": 300, "usage_type": "call"}, {"api_name": "rogerthat.utils.service.get_service_identity_tuple", "line_number": 301, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 310, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 310, "usage_type": "name"}, {"api_name": "rogerthat.utils.service.get_service_identity_tuple", "line_number": 312, "usage_type": "call"}, {"api_name": "solutions.common.dal.get_solution_settings", "line_number": 313, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.get_inbox_message_sender_details", "line_number": 315, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.new_inbox_message", "line_number": 316, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.send_inbox_message_update", "line_number": 318, "usage_type": "call"}, {"api_name": "mcfw.rpc.returns", "line_number": 307, "usage_type": "call"}, {"api_name": "mcfw.rpc.arguments", "line_number": 308, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 324, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 324, "usage_type": "name"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 326, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsCoupon.get_by_id", "line_number": 328, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsCoupon", "line_number": 328, "usage_type": "name"}, {"api_name": "rogerthat.utils.service.get_service_identity_tuple", "line_number": 331, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.set_user", "line_number": 332, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users", "line_number": 332, "usage_type": "name"}, {"api_name": "solutions.common.models.SolutionInboxMessage.get", "line_number": 338, "usage_type": "call"}, {"api_name": "solutions.common.models.SolutionInboxMessage", "line_number": 338, "usage_type": "name"}, {"api_name": "solutions.common.dal.get_solution_settings", "line_number": 343, "usage_type": "call"}, {"api_name": "solutions.common.bizz.service.send_inbox_message_update", "line_number": 344, "usage_type": "call"}, {"api_name": "rogerthat.models.Image.get_by_id", "line_number": 347, "usage_type": "call"}, {"api_name": "rogerthat.models.Image", "line_number": 347, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 321, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 321, "usage_type": "argument"}, {"api_name": "mcfw.rpc.arguments", "line_number": 322, "usage_type": "call"}, {"api_name": "mcfw.consts.MISSING", "line_number": 394, "usage_type": "name"}, {"api_name": "mcfw.consts.MISSING", "line_number": 396, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_QR_CODE", "line_number": 398, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 398, "usage_type": "name"}, {"api_name": "solutions.common.dal.get_solution_settings", "line_number": 399, "usage_type": "call"}, {"api_name": "rogerthat.utils.service.get_service_user_from_service_identity_user", "line_number": 399, "usage_type": "call"}, {"api_name": "mcfw.properties.azzert", "line_number": 400, "usage_type": "call"}, {"api_name": "solutions.common.bizz.SolutionModule.LOYALTY", "line_number": 400, "usage_type": "attribute"}, {"api_name": "solutions.common.bizz.SolutionModule", "line_number": 400, "usage_type": "name"}, {"api_name": "mcfw.consts.MISSING.default", "line_number": 401, "usage_type": "call"}, {"api_name": "mcfw.consts.MISSING", "line_number": 401, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_QR_CODE", "line_number": 403, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 403, "usage_type": "name"}, {"api_name": "rogerthat.dal.service.get_service_identity", "line_number": 405, "usage_type": "call"}, {"api_name": "shop.models.Product.PRODUCT_NEWS_PROMOTION", "line_number": 407, "usage_type": "attribute"}, {"api_name": "shop.models.Product", "line_number": 407, "usage_type": "name"}, {"api_name": "mcfw.properties.azzert", "line_number": 408, "usage_type": "call"}, {"api_name": "mcfw.properties.azzert", "line_number": 409, "usage_type": "call"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 413, "usage_type": "call"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 416, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 418, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 418, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 418, "usage_type": "call"}, {"api_name": "rogerthat.utils.service.get_service_identity_tuple", "line_number": 427, "usage_type": "call"}, {"api_name": "rogerthat.bizz.app.get_app", "line_number": 428, "usage_type": "call"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 429, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 429, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 430, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 430, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 431, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 431, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 432, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 432, "usage_type": "name"}, {"api_name": "rogerthat.to.news.NewsFeedNameTO", "line_number": 444, "usage_type": "call"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 449, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 449, "usage_type": "name"}, {"api_name": "rogerthat.to.news.NewsFeedNameTO", "line_number": 450, "usage_type": "call"}, {"api_name": "rogerthat.to.news.NewsFeedNameTO", "line_number": 453, "usage_type": "call"}, {"api_name": "rogerthat.to.news.NewsFeedNameTO", "line_number": 456, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_QR_CODE", "line_number": 472, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 472, "usage_type": "name"}, {"api_name": "solutions.common.models.news.NewsCoupon", "line_number": 475, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsCoupon.create_parent_key", "line_number": 476, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsCoupon", "line_number": 476, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.run_in_transaction", "line_number": 481, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 481, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 482, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_NORMAL", "line_number": 484, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 484, "usage_type": "name"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 490, "usage_type": "call"}, {"api_name": "mcfw.consts.MISSING", "line_number": 492, "usage_type": "name"}, {"api_name": "rogerthat.rpc.users.get_current_session", "line_number": 495, "usage_type": "call"}, {"api_name": "shop.dal.get_customer", "line_number": 501, "usage_type": "call"}, {"api_name": "solutions.common.bizz.OrganizationType.CITY", "line_number": 502, "usage_type": "attribute"}, {"api_name": "solutions.common.bizz.OrganizationType", "line_number": 502, "usage_type": "name"}, {"api_name": "rogerthat.utils.now", "line_number": 506, "usage_type": "call"}, {"api_name": "solutions.common.dal.cityapp.get_service_user_for_city", "line_number": 518, "usage_type": "call"}, {"api_name": "solutions.common.dal.cityapp.get_cityapp_profile", "line_number": 520, "usage_type": "call"}, {"api_name": "rogerthat.models.App.APP_ID_ROGERTHAT", "line_number": 535, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 535, "usage_type": "name"}, {"api_name": "rogerthat.rpc.users.set_user", "line_number": 542, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users", "line_number": 542, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 353, "usage_type": "call"}, {"api_name": "solutions.common.to.news.NewsBroadcastItemTO", "line_number": 353, "usage_type": "argument"}, {"api_name": "mcfw.rpc.arguments", "line_number": 354, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.User", "line_number": 354, "usage_type": "attribute"}, {"api_name": "rogerthat.rpc.users", "line_number": 354, "usage_type": "name"}, {"api_name": "types.NoneType", "line_number": 355, "usage_type": "name"}, {"api_name": "rogerthat.to.news.NewsActionButtonTO", "line_number": 355, "usage_type": "name"}, {"api_name": "shop.to.OrderItemTO", "line_number": 355, "usage_type": "name"}, {"api_name": "types.NoneType", "line_number": 357, "usage_type": "name"}, {"api_name": "rogerthat.to.news.NewsTargetAudienceTO", "line_number": 358, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.get_by_id", "line_number": 553, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 553, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 555, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem.TYPE_QR_CODE", "line_number": 558, "usage_type": "attribute"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 558, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 559, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItemImage.get_by_id", "line_number": 565, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItemImage", "line_number": 565, "usage_type": "name"}, {"api_name": "solutions.common.bizz.facebook.post_to_facebook", "line_number": 570, "usage_type": "call"}, {"api_name": "solutions.common.bizz.facebook", "line_number": 570, "usage_type": "name"}, {"api_name": "solutions.common.bizz.twitter.update_twitter_status", "line_number": 576, "usage_type": "call"}, {"api_name": "solutions.common.bizz.twitter", "line_number": 576, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 548, "usage_type": "call"}, {"api_name": "mcfw.rpc.arguments", "line_number": 549, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.User", "line_number": 549, "usage_type": "attribute"}, {"api_name": "rogerthat.rpc.users", "line_number": 549, "usage_type": "name"}, {"api_name": "solutions.common.models.SolutionScheduledBroadcast.get", "line_number": 580, "usage_type": "call"}, {"api_name": "solutions.common.models.SolutionScheduledBroadcast", "line_number": 580, "usage_type": "name"}, {"api_name": "rogerthat.rpc.users.set_user", "line_number": 590, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users", "line_number": 590, "usage_type": "name"}, {"api_name": "rogerthat.rpc.users.get_current_user", "line_number": 598, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users", "line_number": 598, "usage_type": "name"}, {"api_name": "solutions.common.models.SolutionScheduledBroadcast.create_key", "line_number": 600, "usage_type": "call"}, {"api_name": "solutions.flex.SOLUTION_FLEX", "line_number": 602, "usage_type": "argument"}, {"api_name": "solutions.common.models.SolutionScheduledBroadcast", "line_number": 600, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 603, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 603, "usage_type": "name"}, {"api_name": "solutions.common.models.SolutionScheduledBroadcast", "line_number": 605, "usage_type": "call"}, {"api_name": "solutions.common.bizz.facebook.extend_access_token", "line_number": 628, "usage_type": "call"}, {"api_name": "solutions.common.bizz.facebook", "line_number": 628, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 630, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue.Queue", "line_number": 635, "usage_type": "call"}, {"api_name": "rogerthat.consts.SCHEDULED_QUEUE", "line_number": 635, "usage_type": "argument"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 635, "usage_type": "name"}, {"api_name": "google.appengine.ext.deferred.deferred.defer", "line_number": 643, "usage_type": "call"}, {"api_name": "google.appengine.ext.deferred.deferred", "line_number": 643, "usage_type": "name"}, {"api_name": "rogerthat.utils.now", "line_number": 645, "usage_type": "call"}, {"api_name": "rogerthat.consts.SCHEDULED_QUEUE", "line_number": 646, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.is_in_transaction", "line_number": 647, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 647, "usage_type": "name"}, {"api_name": "shop.dal.get_customer", "line_number": 670, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.non_transactional", "line_number": 668, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 668, "usage_type": "name"}, {"api_name": "shop.models.Contact.get_one", "line_number": 674, "usage_type": "call"}, {"api_name": "shop.models.Contact", "line_number": 674, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.non_transactional", "line_number": 672, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 672, "usage_type": "name"}, {"api_name": "mcfw.properties.azzert", "line_number": 677, "usage_type": "call"}, {"api_name": "mcfw.properties.azzert", "line_number": 679, "usage_type": "call"}, {"api_name": "shop.exceptions.NoCreditCardException", "line_number": 681, "usage_type": "call"}, {"api_name": "shop.models.Product.create_key", "line_number": 682, "usage_type": "call"}, {"api_name": "shop.models.Product", "line_number": 682, "usage_type": "name"}, {"api_name": "shop.models.Product.PRODUCT_NEWS_PROMOTION", "line_number": 682, "usage_type": "attribute"}, {"api_name": "shop.models.RegioManagerTeam.create_key", "line_number": 683, "usage_type": "call"}, {"api_name": "shop.models.RegioManagerTeam", "line_number": 683, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 684, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 684, "usage_type": "name"}, {"api_name": "mcfw.properties.azzert", "line_number": 685, "usage_type": "call"}, {"api_name": "mcfw.properties.azzert", "line_number": 686, "usage_type": "call"}, {"api_name": "shop.models.Order.create_key", "line_number": 687, "usage_type": "call"}, {"api_name": "shop.models.Order", "line_number": 687, "usage_type": "name"}, {"api_name": "shop.models.OrderNumber.next", "line_number": 687, "usage_type": "call"}, {"api_name": "shop.models.OrderNumber", "line_number": 687, "usage_type": "name"}, {"api_name": "shop.business.legal_entities.get_vat_pct", "line_number": 688, "usage_type": "call"}, {"api_name": "shop.models.Product.PRODUCT_NEWS_PROMOTION", "line_number": 692, "usage_type": "attribute"}, {"api_name": "shop.models.Product", "line_number": 692, "usage_type": "name"}, {"api_name": "rogerthat.rpc.service.BusinessException", "line_number": 696, "usage_type": "call"}, {"api_name": "rogerthat.utils.now", "line_number": 700, "usage_type": "call"}, {"api_name": "shop.models.Order", "line_number": 702, "usage_type": "call"}, {"api_name": "shop.models.Order.STATUS_SIGNED", "line_number": 710, "usage_type": "attribute"}, {"api_name": "shop.models.Order", "line_number": 710, "usage_type": "name"}, {"api_name": "shop.constants.STORE_MANAGER", "line_number": 714, "usage_type": "name"}, {"api_name": "mcfw.properties.azzert", "line_number": 718, "usage_type": "call"}, {"api_name": "shop.models.OrderItem", "line_number": 721, "usage_type": "call"}, {"api_name": "shop.models.Product.PRODUCT_NEWS_PROMOTION", "line_number": 730, "usage_type": "attribute"}, {"api_name": "shop.models.Product", "line_number": 730, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.put", "line_number": 734, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 734, "usage_type": "name"}, {"api_name": "google.appengine.ext.deferred.deferred.defer", "line_number": 737, "usage_type": "call"}, {"api_name": "solutions.common.restapi.store.generate_and_put_order_pdf_and_send_mail", "line_number": 737, "usage_type": "argument"}, {"api_name": "google.appengine.ext.deferred.deferred", "line_number": 737, "usage_type": "name"}, {"api_name": "shop.models.Charge", "line_number": 741, "usage_type": "call"}, {"api_name": "rogerthat.utils.now", "line_number": 742, "usage_type": "call"}, {"api_name": "shop.models.Charge.TYPE_ORDER_DELIVERY", "line_number": 743, "usage_type": "attribute"}, {"api_name": "shop.models.Charge", "line_number": 743, "usage_type": "name"}, {"api_name": "shop.models.Charge.STATUS_PENDING", "line_number": 750, "usage_type": "attribute"}, {"api_name": "shop.models.Charge", "line_number": 750, "usage_type": "name"}, {"api_name": "rogerthat.utils.now", "line_number": 751, "usage_type": "call"}, {"api_name": "google.appengine.ext.deferred.deferred.defer", "line_number": 756, "usage_type": "call"}, {"api_name": "shop.bizz.update_regiomanager_statistic", "line_number": 756, "usage_type": "argument"}, {"api_name": "google.appengine.ext.deferred.deferred", "line_number": 756, "usage_type": "name"}, {"api_name": "shop.bizz.get_payed", "line_number": 761, "usage_type": "call"}, {"api_name": "shop.models.Charge.STATUS_EXECUTED", "line_number": 763, "usage_type": "attribute"}, {"api_name": "shop.models.Charge", "line_number": 763, "usage_type": "name"}, {"api_name": "rogerthat.utils.now", "line_number": 764, "usage_type": "call"}, {"api_name": "rogerthat.utils.channel.send_message", "line_number": 766, "usage_type": "call"}, {"api_name": "rogerthat.utils.channel", "line_number": 766, "usage_type": "name"}, {"api_name": "mcfw.rpc.returns", "line_number": 653, "usage_type": "call"}, {"api_name": "mcfw.rpc.arguments", "line_number": 654, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.User", "line_number": 654, "usage_type": "attribute"}, {"api_name": "rogerthat.rpc.users", "line_number": 654, "usage_type": "name"}, {"api_name": "shop.to.OrderItemTO", "line_number": 654, "usage_type": "name"}, {"api_name": "rogerthat.service.api.news.delete", "line_number": 770, "usage_type": "call"}, {"api_name": "rogerthat.service.api.news", "line_number": 770, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.list_sticky_by_sender_in_app", "line_number": 781, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 781, "usage_type": "name"}, {"api_name": "solutions.common.to.news.SponsoredNewsItemCount", "line_number": 790, "usage_type": "call"}, {"api_name": "mcfw.rpc.returns", "line_number": 773, "usage_type": "call"}, {"api_name": "solutions.common.to.news.SponsoredNewsItemCount", "line_number": 773, "usage_type": "argument"}, {"api_name": "mcfw.rpc.arguments", "line_number": 774, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.User", "line_number": 774, "usage_type": "attribute"}, {"api_name": "rogerthat.rpc.users", "line_number": 774, "usage_type": "name"}, {"api_name": "rogerthat.models.news.NewsItem.list_sticky_by_sender_in_app", "line_number": 807, "usage_type": "call"}, {"api_name": "rogerthat.models.news.NewsItem", "line_number": 807, "usage_type": "name"}, {"api_name": "solutions.common.to.news.SponsoredNewsItemCount", "line_number": 816, "usage_type": "call"}, {"api_name": "mcfw.rpc.returns", "line_number": 793, "usage_type": "call"}, {"api_name": "solutions.common.to.news.SponsoredNewsItemCount", "line_number": 793, "usage_type": "name"}, {"api_name": "mcfw.rpc.arguments", "line_number": 794, "usage_type": "call"}, {"api_name": "rogerthat.rpc.users.User", "line_number": 794, "usage_type": "attribute"}, {"api_name": "rogerthat.rpc.users", "line_number": 794, "usage_type": "name"}, {"api_name": "rogerthat.models.App.APP_TYPE_CITY_APP", "line_number": 825, "usage_type": "attribute"}, {"api_name": "rogerthat.models.App", "line_number": 825, "usage_type": "name"}, {"api_name": "solutions.common.bizz.cityapp.get_apps_in_country_count", "line_number": 825, "usage_type": "call"}, {"api_name": "rogerthat.dal.parent_ndb_key", "line_number": 829, "usage_type": "call"}, {"api_name": "solutions.common.SOLUTION_COMMON", "line_number": 829, "usage_type": "argument"}, {"api_name": "solutions.common.models.news.NewsReview.query", "line_number": 830, "usage_type": "call"}, {"api_name": "solutions.common.models.news.NewsReview", "line_number": 830, "usage_type": "name"}]} +{"seq_id": "394416571", "text": "from django.conf.urls import include, url\nfrom myapp.api.views import User1View,User1DetailView,User1LoginView\n\napp_name ='myapp'\n\nurlpatterns=[\n\n\n url(r'^$',User1View.as_view(),name='user'),\n# url(r'^upload/',views.upload,name='upload'),\n url(r'^login/',User1LoginView.as_view(), name='login'),\n# url(r'^logout/$',views.logout1, name='logout'),\n# url(r'^registeration/', core_views.signup, name='registeration'),\n # url(r'^(?P.+)$',User1DetailView.as_view(), name='download'),\n]\n\n", "sub_path": "myapp/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "myapp.api.views.User1View.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "myapp.api.views.User1View", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "myapp.api.views.User1LoginView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "myapp.api.views.User1LoginView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "5341423", "text": "import numpy\nimport matplotlib.pyplot as plt\nimport torch\n\n\ndef plot_head_map(mma, target_labels, source_labels):\n fig, ax = plt.subplots()\n heatmap = ax.pcolor(mma, cmap=plt.cm.Blues)\n\n # put the major ticks at the middle of each cell\n ax.set_xticks(numpy.arange(mma.shape[1]) + 0.5, minor=False)\n ax.set_yticks(numpy.arange(mma.shape[0]) + 0.5, minor=False)\n\n # without this I get some extra columns rows\n # http://stackoverflow.com/questions/31601351/why-does-this-matplotlib-heatmap-have-an-extra-blank-column\n ax.set_xlim(0, int(mma.shape[1]))\n ax.set_ylim(0, int(mma.shape[0]))\n\n # want a more natural, table-like display\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n\n # source words -> column labels\n ax.set_xticklabels(source_labels, minor=False)\n # target words -> row labels\n ax.set_yticklabels(target_labels, minor=False)\n\n plt.xticks(rotation=45)\n\n # plt.tight_layout()\n plt.show()\n\n\n# column labels -> target words\n# row labels -> source words\n\nattns = torch.load('tools/alignment_train.pkl')\n\nwith open('data/rotowire/roto-sent-data.train.src', encoding='utf-8') as src_f, \\\n open('data/rotowire/roto-sent-data.train.tgt', encoding='utf-8') as tgt_f:\n for idx, (line_src, line_tgt, attn) in enumerate(zip(src_f, tgt_f, attns)):\n srcs = line_src.strip().split()\n tgts = line_tgt.strip().split() + ['']\n plot_head_map(attn.cpu().numpy(), tgts, srcs)\n if idx >= 5:\n break\n", "sub_path": "tools/visualize_attention.py", "file_name": "visualize_attention.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "70092208", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}',r'\\usepackage{siunitx}'] #\nfrom scipy.optimize import curve_fit\n\nx = np.array([17.869,15.306,13.840,12.707,11.889,11.181,10.575,10.041,9.598,9.190])\ny = np.array([1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0])\nx = 1/x**2\nplt.xlabel(r'$\\frac{1}{T^2}\\:/\\:\\si{\\per\\square\\second}$')\nplt.ylabel(r'$B\\:/\\:\\si{\\tesla}$')\n\nplt.plot(x, y,\"b.\",label=\"Messdaten\")\n\ndef g(x,m,b):\n return b+m*x\npopt, pcov = curve_fit(g, x, y)\nprint(\"Steigung =\",popt[0],\"Abschnitt =\", popt[1])\nperr = np.sqrt(np.diag(pcov))\nprint(\"Fehler =\", perr)\ndef f(x):\n return popt[0]*x+popt[1]\nplt.plot(x, f(x), 'r-', label=r'Fit')\n\nplt.legend(loc='best')\nplt.tight_layout()\nplt.savefig('plot.pdf')\n", "sub_path": "102/Werte/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.rcParams", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "550881477", "text": "\nimport pytest\n\n\nfrom swarm64_tpc_toolkit import stats\n\n\n@pytest.fixture\ndef stats_fixture():\n netdata_url = 'http://fake-netdata:19999'\n disk = 'some_disk'\n return stats.Stats(netdata_url, disk)\n\n\ndef test_make_columns():\n metrics = ['foo', 'bar']\n\n columns_expected = [*stats.BASE_COLUMNS]\n columns_expected.extend([f'{metric}_bar' for metric in stats.STATS_METRICS])\n columns_expected.extend([f'{metric}_foo' for metric in stats.STATS_METRICS])\n\n columns = stats.Stats.make_columns(metrics)\n assert sorted(columns) == sorted(columns_expected)\n\n\n# def query_netdata(self, start, end):\ndef test_query_netdata(mocker, stats_fixture):\n start = 123\n end = 456\n\n response_value = 'some fancy response'\n\n def get_return_json():\n return response_value\n\n requests_get_mock = mocker.patch('requests.get')\n requests_get_mock.return_value.json = get_return_json\n\n netdata_data = stats_fixture.query_netdata(start, end)\n\n for idx, chart_key in enumerate(stats_fixture.charts.keys()):\n _, _, kwargs = requests_get_mock.mock_calls[idx]\n assert chart_key == kwargs['params']['chart']\n assert chart_key in netdata_data\n assert netdata_data[chart_key] == response_value\n\n\ndef test_transform(stats_fixture):\n data = {chart_id: {\n 'labels': ['foo', 'bar'],\n 'data': [[1, 2]],\n } for chart_id in stats_fixture.chart_ids}\n\n data = stats_fixture.transform(data)\n\n header_expected = []\n for chart_id in stats_fixture.chart_ids:\n header_expected.append(chart_id + '.foo')\n header_expected.append(chart_id + '.bar')\n\n assert data[0] == header_expected\n assert data[1] == [1, 2] * 4\n", "sub_path": "tests/test_stats.py", "file_name": "test_stats.py", "file_ext": "py", "file_size_in_byte": 1694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "swarm64_tpc_toolkit.stats.Stats", "line_number": 12, "usage_type": "call"}, {"api_name": "swarm64_tpc_toolkit.stats", "line_number": 12, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 8, "usage_type": "attribute"}, {"api_name": "swarm64_tpc_toolkit.stats.BASE_COLUMNS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "swarm64_tpc_toolkit.stats", "line_number": 18, "usage_type": "name"}, {"api_name": "swarm64_tpc_toolkit.stats.STATS_METRICS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "swarm64_tpc_toolkit.stats", "line_number": 19, "usage_type": "name"}, {"api_name": "swarm64_tpc_toolkit.stats.STATS_METRICS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "swarm64_tpc_toolkit.stats", "line_number": 20, "usage_type": "name"}, {"api_name": "swarm64_tpc_toolkit.stats.Stats.make_columns", "line_number": 22, "usage_type": "call"}, {"api_name": "swarm64_tpc_toolkit.stats.Stats", "line_number": 22, "usage_type": "attribute"}, {"api_name": "swarm64_tpc_toolkit.stats", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "43097129", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport inspect\nimport subprocess\nimport asyncio\nimport datetime\nimport json\nimport re\nfrom functools import partial\nfrom operator import is_not\n\n\nclass XCodeBuildArgs(object):\n name = None\n scheme = None\n device = None\n config = 'Debug'\n udid = None\n simulator = None\n\n def __init__(self, *args, **kwargs):\n for key in kwargs:\n if key == 'name':\n self.name = kwargs[key]\n elif key == 'scheme':\n self.scheme = kwargs[key]\n elif key == 'device':\n self.device = kwargs[key]\n elif key == 'config':\n self.config = kwargs[key]\n elif key == 'simulator':\n self.simulator = kwargs[key]\n if self.scheme == None:\n self.scheme = self.name\n\n\nclass XCodeProject(object):\n name = None\n isWorkspace = False\n\n def __init__(self, *args, **kwargs):\n for key in kwargs:\n if key == 'name':\n self.name = kwargs[key]\n elif key == 'isWorkspace':\n self.isWorkspace = kwargs[key]\n\n @property\n def projectType(self):\n return 'workspace' if self.isWorkspace else 'project'\n\n\nclass Device(object):\n name = None\n udid = None\n version = None\n\n def __init__(self, args, **kwargs):\n for key in args:\n if key == 'udid':\n self.udid = args[key]\n elif key == 'name':\n self.name = args[key]\n elif key == 'version':\n self.version = args[key]\n\n\n\ndef getBuildPath(configuration, appName, isDevice):\n device = 'iphoneos' if isDevice else 'iphonesimulator'\n return 'build/Build/Products/{}-{}/{}.app'.format(configuration or 'Debug', 'iphonesimulator', appName)\n\ndef findMatchingSimulator(simulators, simulatorName=None):\n if simulators['devices'] is None:\n return None\n devices = simulators['devices']\n match = None\n for version in devices:\n if not version.startswith('iOS'):\n continue\n for simulator in devices[version]:\n if simulator['availability'] != '(available)':\n continue\n simulator['version'] = version\n if simulator['state'] == 'Booted':\n if simulatorName != None:\n print(\"We couldn't boot your defined simulator due to an already booted simulator. We are limited to one simulator launched at a time.\")\n return Device(simulator)\n if simulator['name'] == simulatorName:\n return Device(simulator)\n\n # Keeps track of the first available simulator for use if we can't find one above.\n if simulatorName == None and match is None:\n print('find simulator', simulator)\n match = Device(simulator)\n if match:\n return match\n return None\n\n\ndef findXcodeProject(files):\n sortedFiles = sorted(files)\n for (index, fileName) in enumerate(reversed(sortedFiles)):\n name, ext = os.path.splitext(fileName)\n if ext == '.xcworkspace':\n return XCodeProject(name=fileName, isWorkspace=True)\n if ext == '.xcodeproj':\n return XCodeProject(name=fileName, isWorkspace=False)\n return None\n\n\ndef parseIOSDevicesList(text):\n def parseLine(line):\n device = re.match('(.*?) \\((.*?)\\) \\[(.*?)\\]', line)\n noSimulator = re.match('(.*?) \\((.*?)\\) \\[(.*?)\\] \\((.*?)\\)', line)\n if device and noSimulator and noSimulator.groups().count != 4:\n return Device({\n 'name' : device.groups()[0],\n 'version' : device.groups()[1],\n 'udid' : device.groups()[2],\n })\n print(text)\n devices = [parseLine(line) for line in text.split('\\n')]\n return filter(partial(is_not, None), devices)\n\n\ndef runIOS(args):\n files = os.listdir(os.path.dirname(os.path.abspath(__file__)))\n xcodeproject = findXcodeProject(files)\n if xcodeproject == None:\n raise 'Could not find Xcode project files in ios folder'\n\n inferredSchemeName, ext = os.path.splitext(xcodeproject.name)\n scheme = args.scheme or inferredSchemeName\n\n print('Found Xcode {} {}'.format(xcodeproject.projectType, xcodeproject.name))\n\n cmd = \"xcrun instruments -s\"\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n try:\n output, error = process.communicate()\n devices = parseIOSDevicesList(output.decode('utf8'))\n except Exception as e:\n raise e\n\n if args.device != None:\n selectedDevice = matchingDevice(devices, args.device)\n return\n return selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices)\n elif args.udid != None:\n return runOnDeviceByUdid(args, scheme, xcodeproject, devices)\n else:\n co = runOnSimulator(xcodeproject, args, inferredSchemeName, scheme)\n try:\n co.send(None)\n except StopIteration:\n print('runIOS FAILED')\n\n\ndef runOnDeviceByUdid(args, scheme, xcodeproject, devices):\n selectedDevice = matchingDeviceByUdid(devices, args.udid)\n selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices)\n\ndef selectedDeviceOperation(selectedDevice, args, scheme, xcodeproject, devices):\n if selectedDevice:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(runOnDevice(selectedDevice, scheme, xcodeproject, args.config))\n loop.close()\n else:\n if devices:\n print('Could not find device with the name: \"' + args.device + '\".')\n print('Choose one of the following:')\n printFoundDevices(devices)\n else:\n print('No iOS devices connected.')\n\nasync def runOnSimulator(xcodeproject, args, inferredSchemeName, scheme):\n try:\n cmd = 'xcrun simctl list --json devices'\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n simulators = json.loads(output.decode('utf8'))\n except Exception as e:\n print('Could not parse the simulator list output')\n raise e\n\n selectedSimulator = findMatchingSimulator(simulators, args.simulator)\n # selectedSimulator = findMatchingSimulator(simulators)\n\n if selectedSimulator == None:\n raise 'Could not find {} simulator'.format(args.simulator)\n\n simulatorFulName = formattedDeviceName(selectedSimulator)\n print('launching {}, UDID: {}...'.format(simulatorFulName, selectedSimulator.udid))\n\n try:\n cmd = 'xcrun instruments -w {}'.format(selectedSimulator.udid)\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n error = process.communicate()\n except Exception as e:\n # do nothing:\n # instruments always fail with 255 because it expects more arguments,\n # but we want it to only launch the simulator\n print('')\n\n appName = buildProject(xcodeproject, selectedSimulator.udid, scheme, args.config)\n if appName is None:\n appName = inferredSchemeName\n appPath = getBuildPath(args.config, appName, False)\n\n print('Installing {}'.format(appPath))\n try:\n cmd = 'xcrun simctl install booted {}'.format(appPath)\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n error = process.communicate()\n except Exception as e:\n raise e\n\n try:\n cmd = '/usr/libexec/PlistBuddy -c Print:CFBundleIdentifier {}'.format(os.path.join(appPath, 'Info.plist'))\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n bundleID = output.decode('utf8').rstrip()\n except Exception as e:\n raise e\n\n print('launching ' + bundleID)\n try:\n cmd = 'xcrun simctl launch booted {}'.format(bundleID)\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n except Exception as e:\n raise e\n\nasync def runOnDevice(selectedDevice, scheme, xcodeproject, configuration):\n appName = buildProject(xcodeproject, selectedDevice.udid, scheme, configuration)\n if appName is None:\n appName = scheme\n\n iosDeployInstallArgs = [\n '--bundle', getBuildPath(configuration, appName, True),\n '--id' , selectedDevice.udid,\n '--justlaunch'\n ]\n try:\n iosDeployInstallArgs.insert(0, 'ios-deploy')\n print(' '.join(iosDeployInstallArgs))\n process = subprocess.Popen(iosDeployInstallArgs, stdout=subprocess.PIPE, universal_newlines=True)\n buildOutput = ''\n for stdout_line in iter(process.stdout.readline, \"\"):\n print(stdout_line)\n buildOutput += stdout_line\n process.stdout.close()\n return_code = process.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, ' '.join(iosDeployInstallArgs))\n except Exception as e:\n raise e\n\n if error:\n print('')\n print('** INSTALLATION FAILED **')\n print('Make sure you have ios-deploy installed globally.')\n print('(e.g \"npm install -g ios-deploy\")')\n else:\n print('** INSTALLATION SUCCEEDED **')\n\n\ndef buildProject(xcodeproject, udid, scheme, configuration = 'Debug'):\n xcodebuildArgs = [\n '-workspace' if xcodeproject.isWorkspace else '-project', xcodeproject.name,\n '-configuration', configuration,\n '-scheme', scheme,\n '-destination', 'id={}'.format(udid),\n '-derivedDataPath', 'build'\n ]\n print('Building using \"xcodebuild {}\"'.format(' '.join(map(str, xcodebuildArgs))))\n try:\n xcodebuildArgs.insert(0, 'xcodebuild')\n process = subprocess.Popen(xcodebuildArgs, stdout=subprocess.PIPE, universal_newlines=True)\n buildOutput = ''\n for stdout_line in iter(process.stdout.readline, \"\"):\n print(stdout_line)\n buildOutput += stdout_line\n process.stdout.close()\n return_code = process.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, ' '.join(xcodebuildArgs))\n\n # FULL_PRODUCT_NAME is the actual file name of the app, which actually comes from the Product Name in the build config, which does not necessary match a scheme name, example output line: export FULL_PRODUCT_NAME=\"Super App Dev.app\"\n p = re.compile('export FULL_PRODUCT_NAME=\"?(.+).app')\n productNameMatch = p.findall(buildOutput)\n if productNameMatch and len(productNameMatch) > 1:\n #0 is the full match, 1 is the app name\n return productNameMatch[1]\n return ('' if error is None else error)\n except Exception as e:\n raise e\n\ndef matchingDevice(devices, deviceName):\n if deviceName == True and devices.length == 1:\n print('Using first available device {} due to lack of name supplied.'.format(devices[0].name))\n return devices[0]\n\n for device in devices:\n print(device.name, device.udid, device.version)\n if (device.name == deviceName or formattedDeviceName(device) == deviceName):\n return device\n\ndef matchingDeviceByUdid(devices, udid):\n return [device for device in devices if device['udid'] is udid]\n\ndef formattedDeviceName(simulator):\n return '{} ({})'.format(simulator.name, simulator.version);\n\ndef printFoundDevices(devices):\n for device in devices:\n print(\"{} udid: {}\".format(device.name, device.udid))\n\n# simulator='iPhone 7',\nargs = XCodeBuildArgs(name = 'SnowFund', device='iPhone 6s')\nrunIOS(args)\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 11643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.splitext", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 116, "usage_type": "call"}, {"api_name": "re.match", "line_number": 117, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 126, "usage_type": "call"}, {"api_name": "operator.is_not", "line_number": 126, "usage_type": "argument"}, {"api_name": "os.listdir", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 141, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 141, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 168, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 182, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 184, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 200, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 200, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 216, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 223, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 223, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 232, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 232, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 250, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 250, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 258, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 282, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 282, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 290, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 293, "usage_type": "call"}]} +{"seq_id": "564267520", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import division\nimport os\nimport pytest\nfrom astropy.table import Table\nimport numpy as np\nfrom mica.archive import aca_l0, asp_l1\nfrom Ska.Numpy import interpolate\n\nhas_l0_2012_archive = os.path.exists(os.path.join(aca_l0.CONFIG['data_root'], '2012'))\n\n\n@pytest.mark.skipif('not has_l0_2012_archive', reason='Test requires 2012 L0 archive')\ndef test_l0_images_meta():\n \"\"\"\n Confirm meta values match reference/regress values\n \"\"\"\n imgs = aca_l0.get_l0_images(467055635, 467055639, slot=7)\n assert imgs[0].meta == {'BGDAVG': 253,\n 'IMGCOL0': 7,\n 'IMGFUNC1': 2,\n 'IMGROW0': -12,\n 'IMGSIZE': 8,\n 'IMGSTAT': 0,\n 'IMGSCALE': 1025,\n 'INTEG': np.float32(1.696),\n 'TIME': np.float64(467055637.49031752)}\n\nhas_l0_2007_archive = os.path.exists(os.path.join(aca_l0.CONFIG['data_root'], '2007'))\nhas_asp_l1 = os.path.exists(os.path.join(asp_l1.CONFIG['data_root']))\n\n@pytest.mark.skipif('not has_l0_2007_archive or not has_asp_l1', reason='Test requires 2007 L0 archive')\ndef test_get_l0_images():\n \"\"\"\n Do a validation test of get_l0_images:\n - Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)\n - Do first moment centroids in row and col\n - Compare to aspect pipeline FM centroids for same slot data\n\n This is a deep test that all the signs are right. If not then everything\n breaks badly because the star image doesn't move in sync with row0, col0.\n \"\"\"\n start = '2007:002:06:00:00'\n stop = '2007:002:06:20:00'\n\n imgs = aca_l0.get_l0_images(start, stop, slot=6)\n\n files = asp_l1.get_files(8008, content=['ACACENT'])\n acen = Table.read(files[0])\n # Pick FM centroids for slot 6\n ok = (acen['alg'] == 1) & (acen['slot'] == 6)\n acen = acen[ok]\n\n # Row and col centroids\n rcs = []\n ccs = []\n times = [img.TIME for img in imgs]\n\n # Easy way to do FM centroids with mgrid\n rw, cw = np.mgrid[0:6, 0:6]\n # rw = [[0, 0, 0, 0, 0, 0],\n # [1, 1, 1, 1, 1, 1],\n # [2, 2, 2, 2, 2, 2],\n # [3, 3, 3, 3, 3, 3],\n # [4, 4, 4, 4, 4, 4],\n # [5, 5, 5, 5, 5, 5]]\n\n for img in imgs:\n norm = np.sum(img)\n rcs.append(np.sum(img * rw) / norm + img.row0)\n ccs.append(np.sum(img * cw) / norm + img.col0)\n\n rcen = interpolate(acen['cent_i'], acen['time'], times)\n ccen = interpolate(acen['cent_j'], acen['time'], times)\n\n assert np.all(np.abs(rcen - rcs) < 0.05)\n assert np.all(np.abs(ccen - ccs) < 0.05)\n\n\n@pytest.mark.skipif('not has_l0_2007_archive or not has_asp_l1', reason='Test requires 2007 L0 archive')\ndef test_get_slot_data_8x8():\n \"\"\"\n Do a validation test of get_l0_images:\n - Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)\n - Do first moment centroids in row and col\n - Compare to aspect pipeline FM centroids for same slot data\n\n This is a deep test that all the signs are right. If not then everything\n breaks badly because the star image doesn't move in sync with row0, col0.\n \"\"\"\n start = '2007:002:06:00:00'\n stop = '2007:002:06:20:00'\n\n slot_data = aca_l0.get_slot_data(start, stop, slot=6, centered_8x8=True)\n\n files = asp_l1.get_files(8008, content=['ACACENT'])\n acen = Table.read(files[0])\n # Pick FM centroids for slot 6\n ok = (acen['alg'] == 1) & (acen['slot'] == 6)\n acen = acen[ok]\n\n # Row and col centroids\n times = slot_data['TIME']\n\n # Easy way to do FM centroids with mgrid\n rw, cw = np.mgrid[0:8, 0:8]\n\n img_raw = slot_data['IMGRAW'] # np.round(slot_data['IMGRAW']).astype(int)\n norm = np.sum(img_raw, axis=(1, 2))\n rcs = np.sum(img_raw * rw, axis=(1, 2)) / norm + slot_data['IMGROW0'] - 1\n ccs = np.sum(img_raw * cw, axis=(1, 2)) / norm + slot_data['IMGCOL0'] - 1\n\n rcen = interpolate(acen['cent_i'], acen['time'], times)\n ccen = interpolate(acen['cent_j'], acen['time'], times)\n\n assert np.all(np.abs(rcen - rcs) < 0.05)\n assert np.all(np.abs(ccen - ccs) < 0.05)\n", "sub_path": "mica/archive/tests/test_aca_l0.py", "file_name": "test_aca_l0.py", "file_ext": "py", "file_size_in_byte": 4259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "mica.archive.aca_l0.CONFIG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "mica.archive.aca_l0", "line_number": 10, "usage_type": "name"}, {"api_name": "mica.archive.aca_l0.get_l0_images", "line_number": 18, "usage_type": "call"}, {"api_name": "mica.archive.aca_l0", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "mica.archive.aca_l0.CONFIG", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mica.archive.aca_l0", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "mica.archive.asp_l1.CONFIG", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mica.archive.asp_l1", "line_number": 30, "usage_type": "name"}, {"api_name": "mica.archive.aca_l0.get_l0_images", "line_number": 46, "usage_type": "call"}, {"api_name": "mica.archive.aca_l0", "line_number": 46, "usage_type": "name"}, {"api_name": "mica.archive.asp_l1.get_files", "line_number": 48, "usage_type": "call"}, {"api_name": "mica.archive.asp_l1", "line_number": 48, "usage_type": "name"}, {"api_name": "astropy.table.Table.read", "line_number": 49, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.mgrid", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "Ska.Numpy.interpolate", "line_number": 73, "usage_type": "call"}, {"api_name": "Ska.Numpy.interpolate", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mica.archive.aca_l0.get_slot_data", "line_number": 94, "usage_type": "call"}, {"api_name": "mica.archive.aca_l0", "line_number": 94, "usage_type": "name"}, {"api_name": "mica.archive.asp_l1.get_files", "line_number": 96, "usage_type": "call"}, {"api_name": "mica.archive.asp_l1", "line_number": 96, "usage_type": "name"}, {"api_name": "astropy.table.Table.read", "line_number": 97, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.mgrid", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 111, "usage_type": "call"}, {"api_name": "Ska.Numpy.interpolate", "line_number": 113, "usage_type": "call"}, {"api_name": "Ska.Numpy.interpolate", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 117, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "297549485", "text": "from flask import render_template, redirect, url_for, request, Blueprint, flash\nfrom app import *\nimport psycopg2\n\npersonal_b = Blueprint('personal_b', __name__, template_folder=\"templates\")\n\n@personal_b.route(\"/personal/add\", methods=[\"GET\", \"POST\"])\ndef personal_add():\n\tif request.method == \"GET\":\n\t\treturn render_template(\"personal/add.html\")\n\telif request.method == \"POST\":\n\t\tcolumns = \"\"\n\t\tplaceholders = \"\"\n\t\tvalues = ()\n\t\tfor a in request.form:\n\t\t\tcolumns += a + \",\"\n\t\t\tplaceholders += \"%s,\"\n\t\t\tvalues += (request.form.get(a), )\n\t\tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n\t\tcursor = conn.cursor()\n\t\tcursor.execute(\"INSERT INTO personal(\" + columns[:-1] + \") VALUES(\" + placeholders[:-1] + \") RETURNING id;\", values)\n\t\tnew_id = cursor.fetchone()[0]\n\t\tconn.commit()\n\t\tconn.close()\n\t\treturn redirect(\"/oneshot/add/\" + str(new_id))\n\n@personal_b.route(\"/personal/search\", methods=[\"GET\", \"POST\"])\ndef personal_search():\n\tif request.method == \"GET\":\n\t\treturn render_template(\"personal/search.html\")\n\telif request.method == \"POST\":\n\t\tconditions = \"\"\n\t\tvalues = ()\n\n\t\tfor a in request.form:\n\t\t\tif request.form.get(a) != \"\":\n\t\t\t\tconditions += a + \" ILIKE %s||'%%' AND \"\n\t\t\t\tvalues += (request.form.get(a), )\n\t\tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n\t\tcursor = conn.cursor()\n\t\tif (conditions == \"\"):\n\t\t\tcursor.execute(\"SELECT * FROM personal;\")\t\t\t\n\t\telse:\n\t\t\tcursor.execute(\"SELECT * FROM personal WHERE \" + conditions[:-5] + \";\", values)\n\t\tresults = [x[:12] for x in cursor.fetchall()]\n\t\tconn.close()\n\n\t\tif len(results) == 0:\n\t\t\tflash(\"No matches found\")\n\t\t\treturn redirect(url_for(\"personal_b.details_search\"))\n\t\telse:\n\t\t\treturn render_template(\"/personal/results.html\", results=results)\n\n# @app.route(\"/search\", methods=[\"GET\", \"POST\"])\n# def search():\n# \tif request.method == \"GET\":\n# \t\treturn render_template(\"search.html\")\n# \telif request.method == \"POST\":\n# \t\tconditions = \"\"\n# \t\tvalues = ()\n# \t\tfor a in request.form:\n# \t\t\tif request.form.get(a) != \"\":\n# \t\t\t\tconditions += a + \" ILIKE %s||'%%' AND \"\n# \t\t\t\tvalues += (request.form.get(a), )\n# \t\tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n# \t\tcursor = conn.cursor()\n# \t\tif (conditions == \"\"):\n# \t\t\tcursor.execute(\"SELECT * FROM students;\")\t\t\t\n# \t\telse:\n# \t\t\tcursor.execute(\"SELECT * FROM students WHERE \" + conditions[:-5] + \";\", values)\n# \t\tresults = cursor.fetchall()\n# \t\tresults = [x[:15] for x in results]\n# \t\tif (results):\n# \t\t\tconn.close()\n# \t\t\treturn render_template(\"results.html\", results=results)\t\n# \t\telse:\n# \t\t\tflash(\"No matches found\")\n# \t\t\treturn redirect(url_for(\"search\"))\n\n# @app.route(\"/edit/\", methods=[\"GET\", \"POST\"])\n# def edit(id):\n# \tif request.method == \"GET\":\n# \t\tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n# \t\tcursor = conn.cursor()\n# \t\tcursor.execute(\"SELECT * FROM students WHERE id=%s\", (id, ))\n# \t\tstudent = cursor.fetchall()[0]\n# \t\tprint(student)\n# \t\tconn.close()\n# \t\treturn render_template(\"edit.html\", student=student)\n# \telse:\n# \t\tcolumns = \"\"\n# \t\tvalues = ()\n# \t\tfor a in request.form:\n# \t\t\tcolumns += a + \"=%s,\"\n# \t\t\tvalues += (request.form.get(a), )\n# \t\tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n# \t\tcursor = conn.cursor()\n# \t\tcursor.execute(\"UPDATE students SET \" + columns[:-1] + \" WHERE id=%s;\", values + (id, ))\n# \t\tconn.commit()\n# \t\tconn.close()\n# \t\treturn redirect(url_for(\"search\"))\n\n# @app.route(\"/delete/\", methods=[\"GET\"])\n# def delete(id):\n# \tconn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n# \tcursor = conn.cursor()\n# \tcursor.execute(\"DELETE FROM students WHERE id=%s\", (id, ))\n# \tconn.commit()\n# \tconn.close()\n# \treturn redirect(url_for(\"search\"))", "sub_path": "blueprints/personal.py", "file_name": "personal.py", "file_ext": "py", "file_size_in_byte": 3857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "568869424", "text": "# ctypes: \tis a foreign function library for Python. It provides C compatible data types,\n# and allows calling functions in DLLs or shared libraries.\n# \t\t\tIt can be used to wrap these libraries in pure Python.\n\nimport os\nimport ctypes\nfrom scipy import integrate\nfrom scipy import LowLevelCallable\nimport numpy as np\n\n\n# os.path.abspath(path):\tReturn a normalized absolutized version of the pathname path.\n# ctypes.CDLL(): load dynamic link libraries (DLL), on Linux CDLL, on Windows WinDLL or OleDLL\nlib = ctypes.CDLL(os.path.abspath('p_b_lib_test.so'))\n\n# ctypes.c_double: ctype data type, in C data type: double, in python data type: float\n# restype: specifies the return type -> in this case a C double/ python float\nlib.f.restype = ctypes.c_double\n\n# argtypes: It is possible to specify the required argument types of functions exported from DLLs by setting\n# the argtypes attribute (first argument of the function is an integer, second argument is a double and\n# third argument is a void)\n# WICHTIG: void Funktion darf keinen Rückgabewert haben!\nlib.f.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double))\n\n# ctypes.cast(obj, type): This function is similar to the cast operator in C.\n# It returns a new instance of type which points to the same memory block as 'obj'.\n# 'type' must be a pointer type, and 'obj' must be an object that can be interpreted as a pointer.\n# user_data = ctypes.cast(ctypes.pointer(c), ctypes.c_void_p)\n\nfunc = LowLevelCallable(lib.f)\n\nf_dsnb = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.3, 0.15, 0.05, 0, 0])\nf_ccatmo = np.array([0.1, 0.2, 0.35, 0.45, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9])\nf_reactor = np.array([5, 2, 1, 0, 0, 0, 0, 0, 0, 0])\nf_data = np.array([1, 0, 0, 0, 1, 0, 0, 1, 0, 0])\n\nfraction = np.array([f_dsnb, f_ccatmo, f_reactor, f_data], dtype='float')\n\n\n# integrate the function\nintegral = integrate.nquad(func, [[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0, 0.1], [1, 1.1], [1, 1.1], [2, 2.1], [2, 2.1],\n [3, 3.1], [3, 3.1], [4, 4.1]])\n# print integral-value:\nprint(integral)\n", "sub_path": "source/test_python_in_c/p_b_lib.py", "file_name": "p_b_lib.py", "file_ext": "py", "file_size_in_byte": 2076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "ctypes.CDLL", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ctypes.c_double", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 24, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 24, "usage_type": "call"}, {"api_name": "ctypes.c_double", "line_number": 24, "usage_type": "attribute"}, {"api_name": "scipy.LowLevelCallable", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.integrate.nquad", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "269328851", "text": "from typing import List, Generator, Any\n\nimport luigi\nfrom exasol_integration_test_docker_environment.lib.base.flavor_task import FlavorBaseTask\nfrom exasol_integration_test_docker_environment.lib.base.json_pickle_target import JsonPickleTarget\nfrom exasol_integration_test_docker_environment.lib.data.database_credentials import DatabaseCredentialsParameter\n\nfrom exaslct_src.exaslct.lib.tasks.test.run_db_test import RunDBTest\nfrom exaslct_src.exaslct.lib.tasks.test.run_db_test_result import RunDBTestDirectoryResult, RunDBTestResult\nfrom exaslct_src.exaslct.lib.tasks.test.run_db_tests_parameter import RunDBTestParameter\n\n\nclass RunDBTestsInDirectory(FlavorBaseTask,\n RunDBTestParameter,\n DatabaseCredentialsParameter):\n directory = luigi.Parameter()\n\n def extend_output_path(self):\n return self.caller_output_path + (self.directory,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._test_container_info = self.test_environment_info.test_container_info\n self.tasks = self.create_test_tasks_from_directory(self.directory)\n\n def run_task(self):\n test_results = yield from self.run_tests()\n result = RunDBTestDirectoryResult(test_results=test_results,\n language=self.language,\n test_folder=self.directory)\n JsonPickleTarget(self.get_output_path().joinpath(\"test_results.json\")).write(test_results, 4)\n self.return_object(result)\n\n def run_tests(self) -> Generator[RunDBTest, Any, List[RunDBTestResult]]:\n test_results = []\n for test_task_config in self.tasks:\n test_result_future = yield from self.run_dependencies(test_task_config)\n test_result = self.get_values_from_future(test_result_future)\n test_results.append(test_result)\n return test_results\n\n def create_test_tasks_from_directory(\n self, directory: str):\n test_container = self._client.containers.get(self._test_container_info.container_name)\n exit_code, ls_output = test_container.exec_run(cmd=\"ls /tests/test/%s/\" % directory)\n test_files = ls_output.decode(\"utf-8\").split(\"\\n\")\n tasks = [self.create_test_task(directory, test_file)\n for test_file in test_files\n if test_file != \"\" and test_file.endswith(\".py\")]\n return tasks\n\n def create_test_task(self, directory: str, test_file: str):\n task = self.create_child_task_with_common_params(\n RunDBTest,\n test_file=directory + \"/\" + test_file\n )\n return task\n", "sub_path": "exaslct_src/exaslct/lib/tasks/test/run_db_test_in_directory.py", "file_name": "run_db_test_in_directory.py", "file_ext": "py", "file_size_in_byte": 2691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "exasol_integration_test_docker_environment.lib.base.flavor_task.FlavorBaseTask", "line_number": 13, "usage_type": "name"}, {"api_name": "exaslct_src.exaslct.lib.tasks.test.run_db_tests_parameter.RunDBTestParameter", "line_number": 14, "usage_type": "name"}, {"api_name": "exasol_integration_test_docker_environment.lib.data.database_credentials.DatabaseCredentialsParameter", "line_number": 15, "usage_type": "name"}, {"api_name": "luigi.Parameter", "line_number": 16, "usage_type": "call"}, {"api_name": "exaslct_src.exaslct.lib.tasks.test.run_db_test_result.RunDBTestDirectoryResult", "line_number": 28, "usage_type": "call"}, {"api_name": "exasol_integration_test_docker_environment.lib.base.json_pickle_target.JsonPickleTarget", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 34, "usage_type": "name"}, {"api_name": "exaslct_src.exaslct.lib.tasks.test.run_db_test.RunDBTest", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "exaslct_src.exaslct.lib.tasks.test.run_db_test_result.RunDBTestResult", "line_number": 34, "usage_type": "name"}, {"api_name": "exaslct_src.exaslct.lib.tasks.test.run_db_test.RunDBTest", "line_number": 54, "usage_type": "argument"}]} +{"seq_id": "568310842", "text": "# -*- coding: utf-8 -*-\n\n\"\"\" Import Best data \"\"\"\n\nimport MySQLdb\nimport MySQLdb.cursors\n\nfrom datetime import date\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom catalog.models import Rental, RentalPrice, CatalogImage, RentalImagePage, Region, RegionPage, Mode\nfrom wagtail.core.models import Page, Site\n\nclass Command(BaseCommand):\n \"\"\" Import management command \"\"\"\n\n help = \"Import rental data\"\n\n def handle(self, *args, **options):\n model_map = {\n 'field_1033_1': 'title',\n 'field_900_1': 'notes',\n 'field_466_1': 'description',\n 'field_1063_1': 'mode_of_drive',\n 'field_465_1': 'category',\n 'field_442_1': 'included_services',\n 'field_443_1': 'not_included_services',\n 'rentalcars_vehicle.oid': 'travel_id',\n }\n\n Rental.objects.all().delete()\n\n db_connection = MySQLdb.connect(\n host=settings.MYSQL_IMPORT_HOST,\n port=settings.MYSQL_IMPORT_PORT,\n user=settings.MYSQL_IMPORT_USER,\n passwd=settings.MYSQL_IMPORT_PASSWD,\n db=settings.MYSQL_IMPORT_DB,\n charset='utf8',\n cursorclass=MySQLdb.cursors.DictCursor)\n cursor = db_connection.cursor()\n\n site = Site.objects.all()[0]\n root_page = site.root_page\n\n try:\n link_page = Page.objects.get(title='BOT_Import')\n except:\n link_page = Page()\n setattr(link_page, 'title', 'BOT_Import')\n root_page.add_child(instance=link_page)\n\n query = \"\"\"SELECT %(db)s.rentalcars.*, %(db)s.rentalcars_vehicle.*\n FROM %(db)s.rentalcars\n INNER JOIN %(db)s.rentalcars_vehicle\n ON %(db)s.rentalcars_vehicle.parentid = %(db)s.rentalcars.oid\n WHERE %(db)s.rentalcars.language='D'\n AND %(db)s.rentalcars_vehicle.language='D'\n %(ic)s\"\"\" % {'db':settings.MYSQL_IMPORT_DB, 'ic': settings.IMPORT_CAP}\n cursor.execute(query)\n\n for result in cursor:\n page = Rental()\n for k in model_map:\n setattr(page, model_map[k], result[k])\n setattr(page, 'show_in_menus', True)\n # Import region if it doesnt exist\n region = Region.objects.get_or_create(import_name=result['field_431_1'])\n setattr(page, 'country_relation', region[0])\n\n # Import mode if it doesnt exist\n mode = Mode.objects.get_or_create(import_name='Mietwagen')\n setattr(page, 'travel_mode_relation', mode[0])\n\n # Create region pages if they dont exist\n try:\n region_page = RegionPage.objects.get(title=result['field_431_1'])\n except RegionPage.MultipleObjectsReturned:\n region_page = RegionPage.objects.filter(title=result['field_431_1'])[0]\n except RegionPage.DoesNotExist:\n region_page = RegionPage(title=result['field_431_1'], region=region[0], show_in_menus=True)\n root_page.add_child(instance=region_page)\n\n try:\n mode_page = RegionPage.objects.descendant_of(region_page).get(title='Mietwagen')\n except RegionPage.DoesNotExist:\n mode_page = RegionPage(\n title='Mietwagen', mode=mode[0], show_in_menus=True)\n region_page.add_child(instance=mode_page)\n\n mode_page.add_child(instance=page)\n\n # Get prices\n price_cursor = db_connection.cursor()\n price_query = \"\"\"SELECT %(db)s.rentalcars_prices.*, %(db)s.rentalcars_price_periods.objectinfo\n FROM %(db)s.rentalcars_prices\n INNER JOIN %(db)s.rentalcars_price_periods\n ON %(db)s.rentalcars_price_periods.oid = %(db)s.rentalcars_prices.subcontainer_oid_30\n WHERE %(db)s.rentalcars_prices.subcontainer_oid_33 = %(car_oid)s AND %(db)s.rentalcars_prices.oid = %(oid)s\n AND %(db)s.rentalcars_prices.language='D'\n AND %(db)s.rentalcars_price_periods.language='D'\"\"\" % {'db': settings.MYSQL_IMPORT_DB, 'car_oid': result['rentalcars_vehicle.oid'], 'oid': result['oid']}\n price_cursor.execute(price_query)\n\n for price in price_cursor:\n try:\n imported_price = RentalPrice()\n imported_price.price = price['price_sell']\n imported_price.unit = price['price_sell_person_unit']\n imported_price.usage = price['field_469_1']\n imported_price.manual_order = price['subcontainer_sort_34']\n\n dates_list = [date.split('.') for date in price['objectinfo'].split(' - ')]\n imported_price.start_period = date(int(dates_list[0][2]), int(dates_list[0][1]), int(dates_list[0][0]))\n imported_price.end_period = date(int(dates_list[1][2]), int(dates_list[1][1]), int(dates_list[1][0]))\n imported_price.rental = page\n imported_price.save()\n except:\n pass\n\n # Get images\n image_cursor = db_connection.cursor()\n image_query = \"\"\"SELECT *\n FROM %s.pictures_objects\n WHERE oid = %s\"\"\" % (settings.MYSQL_IMPORT_DB, result['oid'])\n image_cursor.execute(image_query)\n\n for image in image_cursor:\n try:\n catalog_image = RentalImagePage()\n catalog_image.page = page\n catalog_image.image = CatalogImage.objects.get(picid=image['picid'])\n catalog_image.save()\n except (CatalogImage.DoesNotExist):\n pass\n\n self.stdout.write(self.style.SUCCESS('Mietwagen importiert'))\n", "sub_path": "catalog/management/commands/import_rental.py", "file_name": "import_rental.py", "file_ext": "py", "file_size_in_byte": 5915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 16, "usage_type": "name"}, {"api_name": "catalog.models.Rental.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "catalog.models.Rental.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "catalog.models.Rental", "line_number": 33, "usage_type": "name"}, {"api_name": "MySQLdb.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_HOST", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_PORT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_USER", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_PASSWD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_DB", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "MySQLdb.cursors", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Site.objects.all", "line_number": 45, "usage_type": "call"}, {"api_name": "wagtail.core.models.Site.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Site", "line_number": 45, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page.objects.get", "line_number": 49, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 49, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_DB", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "django.conf.settings.IMPORT_CAP", "line_number": 61, "usage_type": "attribute"}, {"api_name": "catalog.models.Rental", "line_number": 65, "usage_type": "call"}, {"api_name": "catalog.models.Region.objects.get_or_create", "line_number": 70, "usage_type": "call"}, {"api_name": "catalog.models.Region.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "catalog.models.Region", "line_number": 70, "usage_type": "name"}, {"api_name": "catalog.models.Mode.objects.get_or_create", "line_number": 74, "usage_type": "call"}, {"api_name": "catalog.models.Mode.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "catalog.models.Mode", "line_number": 74, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "catalog.models.RegionPage.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 79, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage.MultipleObjectsReturned", "line_number": 80, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 80, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "catalog.models.RegionPage.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 81, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage.DoesNotExist", "line_number": 82, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 82, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage", "line_number": 83, "usage_type": "call"}, {"api_name": "catalog.models.RegionPage.objects.descendant_of", "line_number": 87, "usage_type": "call"}, {"api_name": "catalog.models.RegionPage.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 87, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage.DoesNotExist", "line_number": 88, "usage_type": "attribute"}, {"api_name": "catalog.models.RegionPage", "line_number": 88, "usage_type": "name"}, {"api_name": "catalog.models.RegionPage", "line_number": 89, "usage_type": "call"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_DB", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 103, "usage_type": "name"}, {"api_name": "catalog.models.RentalPrice", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.date.split", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 114, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 116, "usage_type": "call"}, {"api_name": "django.conf.settings.MYSQL_IMPORT_DB", "line_number": 126, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 126, "usage_type": "name"}, {"api_name": "catalog.models.RentalImagePage", "line_number": 131, "usage_type": "call"}, {"api_name": "catalog.models.CatalogImage.objects.get", "line_number": 133, "usage_type": "call"}, {"api_name": "catalog.models.CatalogImage.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "catalog.models.CatalogImage", "line_number": 133, "usage_type": "name"}, {"api_name": "catalog.models.CatalogImage.DoesNotExist", "line_number": 135, "usage_type": "attribute"}, {"api_name": "catalog.models.CatalogImage", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "535992375", "text": "import os, argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--file-ext', help='file extension')\nparser.add_argument('--dir', help='directory path')\nparser.add_argument('--substr', help='Sub string for searching')\nargs = parser.parse_args()\n\nextens = args.file_ext\ndir = args.dir\nsubstr = args.substr\n\n\ndef find(extens):\n for file in os.listdir(dir):\n if extens in file:\n for num, line in enumerate(open(dir+file).readlines()):\n yield file, line, num\n\n\ndef grep(gen, substr):\n for f, l, n in gen:\n if substr in l:\n yield f, l, n\n\nfor data in grep(find(extens), substr):\n print(data)\n", "sub_path": "dz-6/find.py", "file_name": "find.py", "file_ext": "py", "file_size_in_byte": 661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 3, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "369931592", "text": "from __future__ import division\r\nimport sys\r\nimport argparse\r\n\r\nwgsim_path = \"wgsim\"\r\nbedtools_path = \"bedtools\"\r\nsamtools_path = \"samtools\"\r\n\r\ndef rounder(x,y):\r\n\treturn int(round(x / float(y))) * y\r\n\r\nclass SmartFormatter(argparse.HelpFormatter):\r\n\tdef _split_lines(self, text, width):\r\n\t\tif text.startswith('R|'):\r\n\t\t\treturn text[2:].splitlines()\r\n\t\t# this is the RawTextHelpFormatter._split_lines\r\n\t\treturn argparse.HelpFormatter._split_lines(self, text, width)\r\n\r\n\r\nparser=argparse.ArgumentParser(description='Predict CNVs using dudeML')\r\nparser._positionals.title = 'possible modes (enter \\'python3 dudeML.py modeName -h\\' for modeName\\'s help message'\r\nsubparsers = parser.add_subparsers(help='sub-command help')\r\nparser_1 = subparsers.add_parser('predict', help='Predict CNVs in sample based on training classifier including ploidy or frequency of CNV.')\r\nparser_2 = subparsers.add_parser('classify', help='Train a classifier based on a provided training set.')\r\nparser_3 = subparsers.add_parser('winStat', help='Calculate average coverage of windows for a number of bases, given the window size, relative to the chromosomes average coverage.')\r\nparser_4 = subparsers.add_parser('winStatExtra', help='Find averaged coverage of windows, based on previously estimated median coverage.')\r\nparser_5 = subparsers.add_parser('fvecSample', help='Format sample/test file to create sets of windows to analyse as a features vector.')\r\nparser_6 = subparsers.add_parser('fvecTrain', help='Format training file to ID windows with structural variants and create sets of windows to train as a features vector.')\r\nparser_7 = subparsers.add_parser('subTrain', help='Subsample training file for quicker training of the predictor, can subsample a fraction (0.0-1.0) or a number (1-N).')\r\nparser_8 = subparsers.add_parser('simChr', help='Simulate chromosomes containing duplications and deletions using the output of simCNV.')\r\nparser_9 = subparsers.add_parser('simCNV', help='Simulate coordinates of duplications and deletions for multiple chromosomes, which can be combined later.')\r\nparser_10 = subparsers.add_parser('recreateTotal', help='Create the total file from known CNVs for CNV chromosome simulation.')\r\nparser_11 = subparsers.add_parser('covSummary', help='Summarise coverage by chromosome in coverage bedfile.')\r\nparser_12 = subparsers.add_parser('simReads', help='Following simChr, uses WGsim to simulate reads across chromosomes.')\r\nparser_13 = subparsers.add_parser('summarize', help='For a predictions file of known duplications and deletions, finds the number of correctly and falsely identified CNVs.')\r\nparser_14 = subparsers.add_parser('ROC', help='If CNVs are known, works out the rate of true/false positives for given dataset (generated in fvecTrain) and classifier (generated in classify).')\r\nparser_15 = subparsers.add_parser('quantify', help='Quantify CNVs across multiple samples mapped to the same reference.')\r\n\r\nparser_1.add_argument('-i','--INPUT',help='Input bed file, generated by winStat and fvecSample.', required=True)\r\nparser_1.add_argument('-o','--OUTPUT',help='Output file in bed format containing predicted CNVs.', required=True)\r\nparser_1.add_argument('-t','--TRAIN',help='Training file or folder, generated by classify function.', required=True)\r\nparser_1.set_defaults(mode='predict')\r\n\r\nparser_2.add_argument('-i','--INPUT',help='Input bed file, generated by fvecTrain.', required=True)\r\nparser_2.add_argument('-o','--OUTPUT',help='Output training file in binary format.', required=True)\r\nparser_2.add_argument('-m','--MODEL',help='Type of classifier used, can be set as follows: \"CNN\" - Convolutional Neural Network, \"DTC\" - Decision Tree Classifier, \"ETC100\" - Extra Trees Classifier (100 estimators), \"ETC500\" - Extra Trees Classifier (500 estimators), \"RFC100\" - Random Forest Classifier (100 estimators), \"RFC500\" - Random Forest Classifier (500 estimators).' ,choices=[\"CNN\",\"DTC\",\"ETC100\",\"ETC500\",\"RFC100\",\"RFC500\"],default=\"RFC100\")\r\nparser_2.set_defaults(mode='classify')\r\n\r\nparser_3.add_argument('-i','--INPUT',help='Input bed file, generated by genomeCoverageBed.', required=True)\r\nparser_3.add_argument('-o','--OUTPUT',help='Output bed file summarizing stats in windows.', required=True)\r\nparser_3.add_argument(\"-w\",'--WINDOW_SIZE',help=\"The window size chosen to detect CNVs across.\",type=int, default=50)\r\nparser_3.add_argument(\"-s\",'--STEP_SIZE',help=\"The step size chosen to detect CNVs across.\",type=int, default=50)\r\nparser_3.add_argument(\"-sum\",\"--SUMMARY\",help=\"Summary of coverages file\",type=str)\r\nparser_3.add_argument(\"-chr\",'--CHROMOSOME',help=\"Bedfile of chromosomes to estimate statistics over with start and end of chromosomes.\",type=str)\r\nparser_3.set_defaults(mode='winStat')\r\n\r\nparser_4.add_argument('-i','--INPUT',help='Input bed file, generated by genomeCoverageBed.', required=True)\r\nparser_4.add_argument('-o','--OUTPUT',help='Output bed file summarizing stats in windows.', required=True)\r\nparser_4.add_argument('-cov','--COVERAGE',help='Coverage to standardize by.', required=True)\r\nparser_4.add_argument(\"-w\",'--WINDOW_SIZE',help=\"The window size chosen to detect CNVs across.\",type=int, default=50)\r\nparser_4.add_argument(\"-s\",'--STEP_SIZE',help=\"The step size chosen to detect CNVs across.\",type=int, default=50)\r\nparser_4.add_argument(\"-chr\",'--CHROMOSOME',help=\"List of chromosomes to estimate statistics for. Can be a single chromosome, a comma seperated list or a file, with a chromosome on each line.\",type=str)\r\nparser_4.set_defaults(mode='winStatExtra')\r\n\r\nparser_5.add_argument(\"-i\",'--INPUT',help=\"Input file in bed format, containing stats on each window, generated by winStat.\",required=True)\r\nparser_5.add_argument(\"-o\",'--OUTPUT',help=\"Output file in bed format, containing stats on focal window and surrounding windows.\",required=True)\r\nparser_5.add_argument(\"-TE\",'--TE',help=\"Bed or GFF file containing repeat locations in genome.\")\r\nparser_5.add_argument(\"-id\",'--ID',help=\"ID of sample analysed.\",type=str,default=\"NA\")\r\nparser_5.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_5.add_argument(\"-windows\",'--WINDOWS',help=\"Number of windows around focal window to include.\",type=int,default=5)\r\nparser_5.add_argument(\"-w\",'--WINDOW_SIZE',help=\"Window size (bp).\",type=int,default=50)\r\nparser_5.add_argument(\"-s\",'--STEP_SIZE',help=\"Step size (bp).\",type=int, default=50)\r\nparser_5.add_argument(\"-c\",'--CUTOFF',help=\"Ignore windows with a higher proportion of masked positions than the cut off.\",type=float, default=0.01)\r\nparser_5.set_defaults(mode='fvecSample')\r\n\r\nparser_6.add_argument(\"-i\",'--INPUT',help=\"Input file in bed format, containing stats on each window, generated by winStat.\",required=True)\r\nparser_6.add_argument(\"-o\",'--OUTPUT',help=\"Output file in bed format, containing stats on focal window and surrounding windows.\",required=True)\r\nparser_6.add_argument(\"-TE\",'--TE',help=\"Bed or GFF file containing repeat locations in genome.\")\r\nparser_6.add_argument(\"-dels\",\"--DELETION\",help=\"Bed file containing known deletion locations.\",required=True)\r\nparser_6.add_argument(\"-dups\",'--DUPLICATION',help=\"Bed file containing known duplication locations.\",required=True)\r\nparser_6.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_6.add_argument(\"-windows\",'--WINDOWS',help=\"Number of windows around focal window to include.\",type=int,default=5)\r\nparser_6.add_argument(\"-w\",'--WINDOW_SIZE',help=\"Window size (bp).\",type=int,default=50)\r\nparser_6.add_argument(\"-s\",'--STEP_SIZE',help=\"Step size (bp).\",type=int, default=50)\r\nparser_6.add_argument(\"-c\",'--CUTOFF',help=\"Ignore windows with more masked positions than the cut off.\",type=float, default=0.01)\r\nparser_6.set_defaults(mode='fvecTrain')\r\n\r\nparser_7.add_argument(\"-i\",'--INPUT',help=\"Input bed file containing training windows.\",required=True)\r\nparser_7.add_argument(\"-o\",'--OUTPUT',help=\"Output subsampled bed file containing training windows\",required=True)\r\nparser_7.add_argument(\"-N\",\"--NUMBER\",help=\"Number of samples to extract (1+) or fraction to downsample to (0-0.99).\",type=float,required=True)\r\nparser_7.set_defaults(mode='subTrain')\r\n\r\nparser_8.add_argument('-fasta',\"--FASTA\",help='Fasta file containing chromosomes to simulate CNVs in.', required=True)\r\nparser_8.add_argument('-cnvBed',help='Bed file containing loci for CNVs to simulate.', required=True)\r\nparser_8.add_argument(\"-id\",'--ID',help=\"ID to label output files.\",type=str,default=\"NA\")\r\nparser_8.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_8.set_defaults(mode='simChr')\r\n\r\nparser_9.add_argument(\"-fasta\",\"--FASTA\", required=True,help=\"Fasta file containing chromosomes to simulate CNVs in.\")\r\nparser_9.add_argument(\"-CNV\",help=\"Number of duplications and deletions to simulate per megabase.\",type=int,default=50)\r\nparser_9.add_argument(\"-CNVsize\",help=\"Mean size of CNV, size determined in a poisson distribution.\",type=int,default=1000)\r\nparser_9.add_argument(\"-delLength\",help=\"Mean length of deletions to simulate.\",type=int,default=1000)\r\nparser_9.add_argument(\"-dupLength\",help=\"Mean length of duplications to simulate.\",type=int,default=1000)\r\nparser_9.add_argument(\"-N\",\"--NUMBER\",help=\"Ploidy of chromosomes to simulate CNVs on.\",type=int,default=1)\r\nparser_9.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_9.add_argument(\"-c\",'--CUTOFF',help=\"Ignore windows with a higher proportion of masked positions than the cut off.\",type=float, default=0.01)\r\nparser_9.add_argument(\"-TE\",'--TE',help=\"Bed or GFF file containing repeat locations in genome.\")\r\nparser_9.set_defaults(mode='simCNV')\r\n\r\nparser_10.add_argument(\"-fasta\",\"--FASTA\",help=\"Fasta file containing chromosomes to simulate CNVs in.\", required=True)\r\nparser_10.add_argument(\"-dels\",\"--DELETION\",help=\"Bed file containing deletion loci.\", required=True)\r\nparser_10.add_argument(\"-dups\",'--DUPLICATION',help=\"Bed file containing duplication loci\", required=True)\r\nparser_10.add_argument(\"-o\",'--OUTPUT',help=\"Output file containing windows with and without CNVs.\", required=True)\r\nparser_10.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_10.set_defaults(mode='recreateTotal')\r\n\r\nparser_11.add_argument(\"-i\",'--INPUT',required=True,help=\"Bed file generated by genomeCoverageBed.\")\r\nparser_11.add_argument(\"-chr\",'--CHROMOSOME',help=\"List of chromosomes to summarize.\")\r\nparser_11.add_argument(\"-sum\",\"--SUMMARY\",help=\"Summary file to output.\")\r\nparser_11.set_defaults(mode='covSummary')\r\n\r\nparser_12.add_argument(\"-fasta\",\"--FASTA\",help=\"Fasta sequence to simulate reads for.\",required=True)\r\nparser_12.add_argument(\"-cov\",'--COVERAGE',help=\"Coverage of sample to simulate reads for.\",type=int,default=10)\r\nparser_12.add_argument(\"-d\",'--DIRECTORY',help=\"Directory to write output files to.\",type=str,default=\"\")\r\nparser_12.add_argument(\"-id\",'--ID',help=\"ID to label output files.\",type=str,default=\"NA\")\r\nparser_12.add_argument(\"-RL\",'--READ_LENGTH',help=\"Read Length (bp).\",type=int,default=100)\r\nparser_12.add_argument(\"-chr\",'--CHROMOSOME',help=\"List of chromosomes to estimate statistics for.\",type=str)\r\nparser_12.add_argument(\"-se\",'--SE',help=\"Simulate single end reads instead of paired end reads.\",type=bool,default=False)\r\nparser_12.set_defaults(mode='simReads')\r\n\r\nparser_13.add_argument(\"-i\",'--INPUT',help=\"Input file containing predicted CNVs, generated by predict function\",required=True)\r\nparser_13.add_argument(\"-o\",'--OUTPUT',help=\"Summary bed file.\",required=True)\r\nparser_13.add_argument(\"-c\",'--CUTOFF',help=\"Confidence cutoff, CNVs below this value are removed.\",type=float,default=0.0)\r\nparser_13.add_argument(\"-w\",'--WINDOW_SIZE',help=\"Window size (bp).\",type=int,default=50)\r\nparser_13.add_argument(\"-dups\",'--DUPLICATION',help=\"Bed file containing duplication loci.\")\r\nparser_13.add_argument(\"-dels\",\"--DELETION\",help=\"Bed file containing deletion loci.\")\r\nparser_13.add_argument(\"-id\",'--ID',help=\"ID to label output files.\",type=str,default=\"NA\")\r\nparser_13.set_defaults(mode='summarize')\r\n\r\nparser_14.add_argument(\"-i\",'--INPUT',help=\"Input bed file, generated by fvecTrain.\",required=True)\r\nparser_14.add_argument(\"-o\",'--OUTPUT',help=\"File containing false-positive and true-positive rates for duplications and deletions.\",required=True)\r\nparser_14.add_argument('-t','--TRAIN',help='Training file or folder, generated by classify function.', required=True)\r\nparser_14.set_defaults(mode='ROC')\r\n\r\nparser_15.add_argument(\"-i\",'--INPUT',help=\"List of prediction files to quantify CNVs over.\",required=True)\r\nparser_15.add_argument(\"-o\",'--OUTPUT',help=\"File to output CNV windows to.\",required=True)\r\nparser_15.add_argument(\"-gff\",'--GFF',help=\"GFF containing genes or other factor to identify if CNVs are present in each factor.\")\r\nparser_15.add_argument(\"-c\",'--CUTOFF',help=\"Confidence cutoff, CNVs below this value are removed.\",type=float,default=0.5)\r\nparser_15.add_argument(\"-w\",'--WINDOW_SIZE',help=\"Window size (bp).\",type=int,default=50)\r\nparser_15.set_defaults(mode='quantify')\r\n# parser_14.add_argument('-foo', '--foo', action='store_true')\r\n# parser_14.set_defaults(mode='readme')\r\n\r\nparser.add_argument(\"-f\",'--FUNCTION',help=\"The function which will be used within the script, the options are: predict, winStat, simCNV, simChr, fvecTrain, fvecSample, recreateTotal, covSummary, winStatExtra, subTrain,summarize\",type=str)\r\nparser.add_argument(\"-d\",'--DIRECTORY',help=\"Path to export simulated files such as beds containing deletions & duplications or simulated fasta\")\r\nparser.add_argument(\"-id\",'--ID',help=\"The sample ID\",type=str, default=\"NA\")\r\nparser.add_argument(\"-i\",'--INPUT',help=\"The input file across the various functions, may differ in format\",type=str)\r\nparser.add_argument(\"-o\",'--OUTPUT',help=\"The output file across the various functions, may differ in format\",type=str)\r\nparser.add_argument('-quiet','--QUIET', help=\"If set, does not print any messages.\", action='store_true')\r\n\r\nif len(sys.argv)==1:\r\n\tparser.print_help()\r\n\tsys.exit(1)\r\nargs = parser.parse_args()\r\nargsDict = vars(args)\r\nfunction=args.FUNCTION\r\n\r\n\"\"\"\r\nfiles required for input, a training file with the coverages and std dev of different classes\r\nan input bed file with coverages by window\r\nan output bedfile\r\n\"\"\"\r\n\r\nif argsDict['mode'] in ['predict'] or function == \"predict\":\r\n\t\"\"\"\r\n\tinput file is in the following format:\r\n\tCHROMOSOME START END STRAIN COV-5 COV-4 COV-3 COV-2 COV-1 COV COV+1 COV+2 COV+3 COV+4 COV+5 SD-5 SD-4 SD-3 SD-2 SD-1 SD SD+1 SD+2 SD+3 SD+4 SD+5\r\n\tWhere COV is the average coverage of a window, up to 5 up and downstrain of the focal window, and SD is the standard deviation of coverage in each window\r\n\te.g.\r\n\t2L\t8000\t8249\tN\t1.073\t0.902\t1.085\t0.927\t0.976\t1.024\t1\t1.049\t1.183\t1.122\t0.951\t0.141\t0.11\t0.152\t0.067\t0.093\t0.198\t0.163\t0.126\t0.111\t0.117\t0.302\r\n\toutput file is in the following format:\r\n\tCHROMOSOME START END STRAIN MEDIAN_COV PREDICTED_CNV PROBABILITY PREDICTED_PLOIDY PROBABILITY\r\n\te.g.\r\n\t2L\t8000\t8249\tN\t1.024\tN\t1.0\t1\t1.0\r\n\t\"\"\"\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tfrom sklearn.ensemble import RandomForestClassifier\r\n\tfrom sklearn.datasets import make_classification\r\n\tfrom sklearn.externals import joblib\r\n\tfrom sklearn.tree import DecisionTreeClassifier\r\n\tfrom sklearn.neural_network import MLPClassifier\r\n\tfrom sklearn.ensemble import ExtraTreesClassifier\r\n\timport os\r\n\tif os.path.isfile(args.TRAIN) == True:\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"Classifying over a single training set\")\r\n\t\tclf = joblib.load(args.TRAIN)\r\n\t\tclf2 = joblib.load(args.TRAIN + \"2\")\r\n\t\tinput = args.INPUT\r\n\t\ttest_in = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\t\toutput = args.OUTPUT\r\n\t\ttest_in2 = test_in.drop(test_in[[0,1,2,3]], axis=1)\r\n\t\ttest_Y = []\r\n\t\ttest_in2.columns = list(range(0,len(test_in2.columns)))\r\n\t\ttest_in2_y = []\r\n\t\ttest_in2_yA = []\r\n\t\ttest_in2_y2 = []\r\n\t\ttest_in2_yA2 = []\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"Classifying windows\")\r\n\t\ttest_in2_y.extend(list(clf.predict(test_in2)))\r\n\t\ttest_in2_y2.extend(list(clf2.predict(test_in2)))\r\n\t\ttest_in2_yA.extend(list(pd.DataFrame(clf.predict_proba(test_in2),columns=None).max(axis=1)))\r\n\t\ttest_in2_yA2.extend(list(pd.DataFrame(clf2.predict_proba(test_in2),columns=None).max(axis=1)))\r\n\t\tout_df = pd.DataFrame({\"chr\":list(test_in[0]), \"start\":list(test_in[1]), \"end\":list(test_in[2]), \"ID\":list(test_in[3]), \"coverage\":list(test_in2[(len(test_in2.columns)-4)/2]) ,\"CNV\":test_in2_y,\"CNVprob\":test_in2_yA,\"ploidy\":test_in2_y2,\"ploidyprob\":test_in2_yA2})\r\n\t\tout_df.to_csv(output,sep=\"\\t\",index =False,header=None)\r\n\telif os.path.isfile(args.TRAIN) == False and os.path.isdir(args.TRAIN) == True:\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"Bootstrapping over multiple training sets\")\r\n\t\tpathe = args.TRAIN\r\n\t\tif pathe.endswith(\"/\") == False:\r\n\t\t\tpathe += \"/\"\r\n\t\tout_bs_1 = pd.DataFrame(columns=[0])\r\n\t\tout_bs_2 = pd.DataFrame(columns=[0])\r\n\t\tcount = 0\r\n\t\ttest_in = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\t\toutput = args.OUTPUT\r\n\t\ttest_in2 = test_in.drop(test_in[[0,1,2,3]], axis=1)\r\n\t\ttest_Y = []\r\n\t\ttest_in2.columns = list(range(0,len(test_in2.columns)))\r\n\t\tfor d,s,f in os.walk(pathe):\r\n\t\t\tfor inf in f:\r\n\t\t\t\tif os.path.isfile(pathe + inf) == True and os.path.isfile(pathe + inf + \"2\") == True:\r\n\t\t\t\t\tif args.QUIET == False:\r\n\t\t\t\t\t\tprint(\"Processing classifier \" + str(count+1))\r\n\t\t\t\t\tclf = joblib.load(pathe + inf)\r\n\t\t\t\t\tclf2 = joblib.load(pathe + inf + \"2\")\r\n\t\t\t\t\tout_bs_1[count] = list(clf.predict(test_in2))\r\n\t\t\t\t\tout_bs_2[count] = list(clf2.predict(test_in2))\r\n\t\t\t\t\tcount += 1\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"Estimating consensus states\")\r\n\t\tbs_1 = list(out_bs_1.mode(axis=1)[0])\r\n\t\tbs_1_prob = list(out_bs_1[out_bs_1 == bs_1].count(axis='columns')/float(len(out_bs_1.columns)))\r\n\t\tbs_2 = list(out_bs_2.mode(axis=1)[0])\r\n\t\tbs_2_prob = list(out_bs_2[out_bs_2 == bs_2].count(axis='columns')/float(len(out_bs_2.columns)))\r\n\t\tout_df = pd.DataFrame({\"chr\":list(test_in[0]), \"start\":list(test_in[1]), \"end\":list(test_in[2]), \"ID\":list(test_in[3]), \"coverage\":list(test_in2[(len(test_in2.columns)/4)-1]) ,\"CNV\":bs_1,\"CNVprob\":bs_1_prob,\"ploidy\":bs_2,\"ploidyprob\":bs_2_prob})\r\n\t\tout_df.to_csv(output,sep=\"\\t\",index =False,header=None)\r\n\r\nelif argsDict['mode'] in ['classify'] or function == \"classify\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tfrom sklearn.ensemble import RandomForestClassifier\r\n\tfrom sklearn.datasets import make_classification\r\n\tfrom sklearn.externals import joblib\r\n\tfrom sklearn.tree import DecisionTreeClassifier\r\n\tfrom sklearn.neural_network import MLPClassifier\r\n\tfrom sklearn.ensemble import ExtraTreesClassifier\r\n\tmodels = {\"RFC100\":RandomForestClassifier(n_estimators=100), \"RFC500\":RandomForestClassifier(n_estimators=500), \"CNN\":MLPClassifier(), \"ETC100\":ExtraTreesClassifier(n_estimators=100), \"ETC500\":ExtraTreesClassifier(n_estimators=500), \"DTC\":DecisionTreeClassifier()}\r\n\ttraining_in = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\tX = training_in.drop(training_in[[0,1,2,3,4]], axis=1)\r\n\tX.columns = list(range(0,len(X.columns)))\r\n\tY = list(training_in[3])\r\n\tclf = models[args.MODEL]\r\n\tclf.fit(X,Y)\r\n\tY2 = list(map(str,list(training_in[4])))\r\n\tclf2 = RandomForestClassifier(n_estimators=100)\r\n\tclf2.fit(X,Y2)\r\n\tjoblib.dump(clf, args.OUTPUT)\r\n\tjoblib.dump(clf2, args.OUTPUT + \"2\")\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Classifier Trained\")\r\n\r\nelif argsDict['mode'] in ['winStat'] or function == \"winStat\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\timport scipy.stats\r\n\timport os\r\n\t\"\"\"\r\n\tinput is generated by genomeCoverageBed -d in the following format:\r\n\tCHR POS COVERAGE\r\n\tFollowing that, per chromosome, find the median coverage of covered bases.\r\n\tCan find median for all chromosomes or a specified set of them, one chromosome ID per line.\r\n\t\"\"\"\r\n\tos.system(bedtools_path + \" genomecov -d -ibam \" + args.INPUT + \" > dudeml_temp_covsperbase.bed\")\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Calculating median coverage\")\r\n\ttest = pd.read_table(\"dudeml_temp_covsperbase.bed\",header=None)\r\n\tcovs_median = {}\r\n\tsplits_median = {}\r\n\tfor line in open(args.CHROMOSOME):\r\n\t\ti = line.split()[0].rstrip()\r\n\t\tcovs_median[i] = test[2][test[2] != 0][test[0] == i].median()\r\n\t\tprint(i,covs_median[i])\r\n\tif args.SUMMARY is not None:\r\n\t\tout = open(args.SUMMARY,\"w\")\r\n\t\tfor i in covs_median:\r\n\t\t\tout.write(i + \"\\t\" + str(covs_median[i]) + \"\\n\")\r\n\t\tout.close()\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Calculating relative median coverage per window\")\r\n\tchr_stats = []\r\n\tcount = 0\r\n\t\"function takes in a pandas dataframe column and outputs a dataframe containing the start and end of window, as well as window coverage median and standard deviation\"\r\n\tdef rolling_with_step(chr,s, window, step):\r\n\t\tvert_idx_list = np.arange(1, s.size - window, step)\r\n\t\thori_idx_list = np.arange(window)\r\n\t\tA, B = np.meshgrid(hori_idx_list, vert_idx_list)\r\n\t\tidx_array = A + B\r\n\t\tx_array = s.values[idx_array]\r\n\t\tidx = list(s.index[vert_idx_list + (int(window))])\r\n\t\tmed = list(np.around(list(map(np.median, x_array)),4))\r\n\t\tintq = list(np.around(list(map(scipy.stats.iqr, x_array)),4))\r\n\t\tmeans = list(np.around(list(map(np.mean, x_array)),4))\r\n\t\tstd = list(np.around(list(map(np.std, x_array)),4))\r\n\t\treturn pd.DataFrame({\"chr\":chr,\"start\":vert_idx_list,\"end\":vert_idx_list + window,\"med\":med,\"iqr\":intq,\"mean\":means,\"std\":std})\r\n\tout_df = pd.DataFrame(columns=[\"chr\",\"start\",\"end\",\"med\",\"iqr\",\"mean\",\"std\"])\r\n\t\"\"\"\r\n\tFor each chromosome, divide each base by the chromosome median (or total median).\r\n\tFollowing that, finds the median and standard deviation for windows of a given size\r\n\t\"\"\"\r\n\tfor i in covs_median:\r\n\t\ttest_chrs = test[test[0] == i]\r\n\t\ttest_chrs_3 = test_chrs[2]/covs_median[i]\r\n\t\twins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE-1,args.STEP_SIZE)\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"Chromosome \" + str(i) + \" processed\")\r\n\t\tout_df = pd.concat([out_df,wins_step])\r\n\tout_df['chr']=out_df['chr'].astype(str)\r\n\tout_df['start']=out_df['start'].astype(int)\r\n\tout_df['end']=out_df['end'].astype(int)\r\n\tout_df.to_csv(args.OUTPUT,sep=\"\\t\",index =False,columns=None,header=None)\r\n\tos.remove(\"dudeml_temp_covsperbase.bed\")\r\n\r\nelif argsDict['mode'] in ['simChr'] or function == \"simChr\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\tfrom Bio import SeqIO\r\n\timport os\r\n\tos.system(\"cp \" + args.FASTA + \" \" + pathOut + args.ID + \"_noCNV.fa\")\r\n\t#os.system(\"maskFastaFromBed -fi \" + args.FASTA + \" -bed \" + args.TE + \" -fo \" + pathOut + args.ID + \"_noCNV.fa\")\r\n\tchrs = []\r\n\tchr = {}\r\n\tchr2 = {}\r\n\tfor r in SeqIO.parse(open(pathOut + args.ID + \"_noCNV.fa\"),\"fasta\"):\r\n\t\tchrs.append(r.id)\r\n\t\tchr[r.id] = str(r.seq)\r\n\t\tchr2[r.id] = \"\"\r\n\tfor line in open(args.cnvBed):\r\n\t\tif line.split()[3].rstrip() == \"normal\":\r\n\t\t\tchr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]\r\n\t\telif line.split()[3].rstrip() == \"del\":\r\n\t\t\tpass\r\n\t\telif line.split()[3].rstrip() == \"dup\":\r\n\t\t\tif float(line.split()[-1].rstrip()) > 1.5:\r\n\t\t\t\tfor v in range(0,int(line.split()[-1].rstrip())):\r\n\t\t\t\t\tchr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]\r\n\t\t\telse:\r\n\t\t\t\tchr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]\r\n\t\t\t\tchr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]\r\n\tfor i in chrs:\r\n\t\tout = open(pathOut + i + \"_\" + args.ID + \"_CNV.fa\",\"w\")\r\n\t\tout.write(\">\" + i + \"\\n\" + chr2[i] + \"\\n\")\r\n\tout.close()\r\n\tos.remove(pathOut + args.ID + \"_noCNV.fa\")\r\n\r\nelif argsDict['mode'] in ['fvecTrain'] or function == \"fvecTrain\":\r\n\timport os\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\timport math\r\n\tfrom shutil import copyfile\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\tdef roundup(x):\r\n\t\treturn int(math.ceil(x / args.WINDOW_SIZE)) * args.WINDOW_SIZE\r\n\tdef rounddown(x):\r\n\t\treturn int(math.floor(x / args.WINDOW_SIZE)) * args.WINDOW_SIZE\r\n\t\"\"\"If ignoring TEs is required, due to their inherit weirdness with split reads/coverage, this removes windows with TE sequences.\"\"\"\r\n\tif args.TE is not None:\r\n\t\tos.system(bedtools_path + \" intersect -v -wa -a \"+ args.INPUT + \" -b \" + args.TE + \" -f \" + str(args.CUTOFF) + \" > \"+ pathOut + \"dudeml_temp.bed\")\r\n\telif args.TE is None:\r\n\t\tcopyfile(args.INPUT, pathOut + \"dudeml_temp.bed\")\r\n\tdel_cp = {}\r\n\tdup_cp = {}\r\n\tdup_temp_1 = open(\"dup_temp_1.bed\",\"w\")\r\n\tdel_temp_1 = open(\"del_temp_1.bed\",\"w\")\r\n\t\"\"\"Reformat deletion and duplication windows to find overlapping windows with\"\"\"\r\n\tfor line in open(args.DUPLICATION):\r\n\t\tline = line.rstrip()\r\n\t\tcp = str((float(line.split()[5])*float(line.split()[4])) + ((1-float(line.split()[4])) * 1))\r\n\t\tdup_temp_1.write(\"\\t\".join([line.split()[0],str(rounddown(int(line.split()[1]))),str(roundup(int(line.split()[2]))),cp]) + \"\\n\")\r\n\tfor line in open(args.DELETION):\r\n\t\tline = line.rstrip()\r\n\t\tcp = str((float(line.split()[5])*float(line.split()[4])) + ((1-float(line.split()[4])) * 1))\r\n\t\tdel_temp_1.write(\"\\t\".join([line.split()[0],str(rounddown(int(line.split()[1]))),str(roundup(int(line.split()[2]))),cp]) + \"\\n\")\r\n\tdup_temp_1.close()\r\n\tdel_temp_1.close()\r\n\tos.system(bedtools_path + \" makewindows -b dup_temp_1.bed -w \" + str(args.WINDOW_SIZE) + \" -s \" + str(args.STEP_SIZE) + \" -i src > dup_temp_2.bed\")\r\n\tos.system(bedtools_path + \" makewindows -b del_temp_1.bed -w \" + str(args.WINDOW_SIZE) + \" -s \" + str(args.STEP_SIZE) + \" -i src > del_temp_2.bed\")\r\n\tfor line in open(\"dup_temp_2.bed\"):\r\n\t\tdup_cp[line.split()[0] + \"\\t\" + str(int(line.split()[1]) + 1) + \"\\t\" + line.split()[2]] = line.split()[3]\r\n\tfor line in open(\"del_temp_2.bed\"):\r\n\t\tdel_cp[line.split()[0] + \"\\t\" + str(int(line.split()[1]) + 1) + \"\\t\" + line.split()[2]] = line.split()[3]\r\n\tout = open(pathOut + \"dudeml_temp2.bed\",\"w\")\r\n\tfor line in open(pathOut + \"dudeml_temp.bed\"):\r\n\t\tcopy = \"N\"\r\n\t\tline = line.rstrip()\r\n\t\tliner = line.split()\r\n\t\tif line.split()[0] + \"\\t\" + line.split()[1] + \"\\t\" + str(int(line.split()[2])) in dup_cp:\r\n\t\t\tout.write(\"\\t\".join([liner[0],liner[1],liner[2],\"dup\",dup_cp[line.split()[0] + \"\\t\" + line.split()[1] + \"\\t\" + str(int(line.split()[2]))], \"\\t\".join(line.split()[3:])]) + \"\\n\")\r\n\t\telif line.split()[0] + \"\\t\" + line.split()[1] + \"\\t\" + str(int(line.split()[2])) in del_cp:\r\n\t\t\tout.write(\"\\t\".join([liner[0],liner[1],liner[2],\"del\",del_cp[line.split()[0] + \"\\t\" + line.split()[1] + \"\\t\" + str(int(line.split()[2]))], \"\\t\".join(line.split()[3:])]) + \"\\n\")\r\n\t\telse:\r\n\t\t\tif len(liner) == 5 or len(liner) == 7 or len(liner) == 8:\r\n\t\t\t\tout.write(\"\\t\".join([liner[0],liner[1],liner[2],\"N\",\"1.0\", \"\\t\".join(line.split()[3:])]) + \"\\n\")\r\n\tout.close()\r\n\tv=args.WINDOW_SIZE\r\n\tif args.STEP_SIZE is not None:\r\n\t\tv=int(args.STEP_SIZE)\r\n\telif args.STEP_SIZE is None:\r\n\t\tv=int(args.WINDOW_SIZE)\r\n\twindow_pos = [[0,1,2,3,4,5]] * ((2*args.WINDOWS) + 1)\r\n\toutput = open(args.OUTPUT,\"w\")\r\n\tcount = 0\r\n\tfor line in open(pathOut + \"dudeml_temp2.bed\"):\r\n\t\tcount += 1\r\n\t\tif count % 100000 == 0:\r\n\t\t\tif args.QUIET == False:\r\n\t\t\t\tprint(int(count),\"windows processed\")\r\n\t\twindow_pos += [window_pos.pop(0)]\r\n\t\twindow_pos[(2*args.WINDOWS)] = line.rstrip().split()\r\n\t\tclass_ud = \"N\"\r\n\t\tif len(list(set([item[0] for item in window_pos]))) == 1:\r\n\t\t\tif window_pos[args.WINDOWS][3] == \"dup\" or window_pos[args.WINDOWS][3] == \"Dup\":\r\n\t\t\t\tclass_ud = \"Dup\"\r\n\t\t\telif window_pos[args.WINDOWS][3] == \"del\" or window_pos[args.WINDOWS][3] == \"Del\":\r\n\t\t\t\tclass_ud = \"Del\"\r\n\t\t\tcc = 0\r\n\t\t\tcv = 0\r\n\t\t\tfor k in window_pos:\r\n\t\t\t\tif int(k[1]) == int(window_pos[args.WINDOWS][1]) - (v*(args.WINDOWS - cc)):\r\n\t\t\t\t\tcv += 1\r\n\t\t\t\tcc += 1\r\n\t\t\tif cv == len(window_pos):\r\n\t\t\t\tcq = [str(window_pos[args.WINDOWS][0]),str(window_pos[args.WINDOWS][1]), str(window_pos[args.WINDOWS][2]), class_ud,str(window_pos[args.WINDOWS][4])]\r\n\t\t\t\tfor k in window_pos:\r\n\t\t\t\t\tcq.append(str(k[5]))\r\n\t\t\t\t\tcq.append(str(k[6]))\r\n\t\t\t\t\tcq.append(str(k[7]))\r\n\t\t\t\t\tcq.append(str(k[8]))\r\n\t\t\t\toutput.write(\"\\t\".join(cq) + \"\\n\")\r\n\toutput.close()\r\n\tos.remove(\"dudeml_temp.bed\")\r\n\tos.remove(\"dudeml_temp2.bed\")\r\n\tos.remove(\"dup_temp_1.bed\")\r\n\tos.remove(\"del_temp_1.bed\")\r\n\tos.remove(\"dup_temp_2.bed\")\r\n\tos.remove(\"del_temp_2.bed\")\r\n\r\nelif argsDict['mode'] in ['fvecSample'] or function == \"fvecSample\":\r\n\timport os\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\timport gzip\r\n\tfrom shutil import copyfile\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\ttest = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\tif args.OUTPUT.endswith(\".gz\"):\r\n\t\toutput = open(args.OUTPUT.rstrip(\".gz\"), 'w')\r\n\telse:\r\n\t\toutput = open(args.OUTPUT,\"w\")\r\n\tif args.TE is not None:\r\n\t\tos.system(bedtools_path + \" intersect -v -wa -a \"+ args.INPUT + \" -b \" + args.TE + \" -f \" + str(args.CUTOFF) + \" > \"+ pathOut + \"dudeml_temp.bed\")\r\n\telif args.TE is None:\r\n\t\tcopyfile(args.INPUT, pathOut + \"dudeml_temp.bed\")\r\n\tv=args.WINDOW_SIZE\r\n\tif args.STEP_SIZE is not None:\r\n\t\tv=int(args.STEP_SIZE)\r\n\telif args.STEP_SIZE is None:\r\n\t\tv=int(args.WINDOW_SIZE)\r\n\twindow_pos = [[0,1,2,3,4,5]] * ((2*args.WINDOWS) + 1)\r\n\tcount = 0\r\n\tfor line in open(pathOut + \"dudeml_temp.bed\"):\r\n\t\tcount += 1\r\n\t\tif count % 100000 == 0:\r\n\t\t\tif args.QUIET == False:\r\n\t\t\t\tprint(int(count),\"windows processed\")\r\n\t\twindow_pos += [window_pos.pop(0)]\r\n\t\twindow_pos[(2*args.WINDOWS)] = line.rstrip().split()\r\n\t\tif len(list(set([item[0] for item in window_pos]))) == 1:\r\n\t\t\tcc = 0\r\n\t\t\tcv = 0\r\n\t\t\tfor k in window_pos:\r\n\t\t\t\tif int(k[1]) == int(window_pos[args.WINDOWS][1]) - (v*(args.WINDOWS- cc)):\r\n\t\t\t\t\tcv += 1\r\n\t\t\t\tcc += 1\r\n\t\t\tif cv == len(window_pos):\r\n\t\t\t\tcq = [str(window_pos[args.WINDOWS][0]),str(window_pos[args.WINDOWS][1]), str(window_pos[args.WINDOWS][2]), str(args.ID)]\r\n\t\t\t\tfor k in window_pos:\r\n\t\t\t\t\tcq.append(str(k[3]))\r\n\t\t\t\t\tcq.append(str(k[4]))\r\n\t\t\t\t\tcq.append(str(k[5]))\r\n\t\t\t\t\tcq.append(str(k[6]))\r\n\t\t\t\toutput.write(\"\\t\".join(cq) + \"\\n\")\r\n\tif args.OUTPUT.endswith(\".gz\"):\r\n\t\tos.system(\"gzip \" + args.OUTPUT.rstrip(\".gz\"))\r\n\tos.remove(pathOut + \"dudeml_temp.bed\")\r\n\r\nelif argsDict['mode'] in ['simCNV'] or function == \"simCNV\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tfrom Bio import SeqIO\r\n\timport random\r\n\timport os\r\n\tdf_del = pd.DataFrame(columns = [1,2,3,4])\r\n\tdf_dup = pd.DataFrame(columns = [1,2,3,4])\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\tout = open(pathOut + \"chrs.bed\",\"w\")\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Generating duplication and deletion coordinates\")\r\n\tfor r in SeqIO.parse(open(args.FASTA),\"fasta\"):\r\n\t\tout.write(\"\\t\".join([r.id,\"1\",str(len(str(r.seq)))]) + \"\\n\")\r\n\t\tdup_lengths = []\r\n\t\tdel_lengths = []\r\n\t\tcnv_count = round((len(str(r.seq))/1000000)*args.CNV)\r\n\t\twhile len(dup_lengths) < cnv_count:\r\n\t\t\tx = round(np.random.normal(args.dupLength, args.CNVsize, 1)[0])\r\n\t\t\tif x > 50:\r\n\t\t\t\tdup_lengths.append(x)\r\n\t\twhile len(del_lengths) < cnv_count:\r\n\t\t\tx = round(np.random.normal(args.delLength, args.CNVsize, 1)[0])\r\n\t\t\tif x > 50:\r\n\t\t\t\tdel_lengths.append(x)\r\n\t\tdup_start = list(np.random.randint(len(str(r.seq)), size=(1, cnv_count))[0])\r\n\t\tdel_start = list(np.random.randint(len(str(r.seq)), size=(1, cnv_count))[0])\r\n\t\tdup_ends = list(map(int,[a + b for a, b in zip(dup_start, dup_lengths)]))\r\n\t\tdel_ends = list(map(int,[a + b for a, b in zip(del_start, del_lengths)]))\r\n\t\tdups = pd.DataFrame({1:[r.id]*cnv_count,2:dup_start,3:dup_ends,4:dup_lengths})\r\n\t\tdels = pd.DataFrame({1:[r.id]*cnv_count,2:del_start,3:del_ends,4:del_lengths})\r\n\t\tdf_dup = df_dup.append(dups)\r\n\t\tdf_del = df_del.append(dels)\r\n\tout.close()\r\n\tdf_dup.to_csv(pathOut + \"dup.bed\",header=False,index=False,sep=\"\\t\")\r\n\tdf_del.to_csv(pathOut + \"del.bed\",header=False,index=False,sep=\"\\t\")\r\n\tos.system(bedtools_path + \" sort -i \" + pathOut + \"dup.bed | \" + bedtools_path + \" merge -i stdin > \" + pathOut + \"dup2.bed\")\r\n\tos.system(bedtools_path + \" sort -i \" + pathOut + \"del.bed | \" + bedtools_path + \" merge -i stdin > \" + pathOut + \"del2.bed\")\r\n\tif args.TE is not None:\r\n\t\tos.system(bedtools_path + \" intersect -v -wa -a \"+ pathOut + \"del2.bed -b \" + args.TE + \" -f \" + str(args.CUTOFF) + \" > \"+ pathOut + \"del3.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -v -wa -a \"+ pathOut + \"dup2.bed -b \" + args.TE + \" -f \" + str(args.CUTOFF) + \" > \"+ pathOut + \"dup3.bed\")\r\n\telif args.TE is None:\r\n\t\tos.system(\"cp \"+ pathOut + \"del2.bed \"+ pathOut + \"del3.bed\")\r\n\t\tos.system(\"cp \"+ pathOut + \"dup2.bed \"+ pathOut + \"dup3.bed\")\r\n\tos.system(bedtools_path + \" intersect -wa -v -a \" + pathOut + \"dup3.bed -b \" + pathOut + \"del3.bed > \" + pathOut + \"dup4.bed\")\r\n\tos.system(bedtools_path + \" intersect -wa -v -a \" + pathOut + \"del3.bed -b \" + pathOut + \"dup3.bed > \" + pathOut + \"del4.bed\")\r\n\tno_chrs = list(range(1, int(args.NUMBER)+1))\r\n\tchr_freq = {}\r\n\tfor i in no_chrs:\r\n\t\tchr_freq[i] = i/args.NUMBER\r\n\tno_chrs = list(range(1, int(args.NUMBER)+1))\r\n\tchr_freq = {}\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Generating duplication and deletion frequencies\")\r\n\tfor i in no_chrs:\r\n\t\tchr_freq[i] = round(i/args.NUMBER,3)\r\n\tfor i in [\"del\",\"dup\"]:\r\n\t\tout = open(pathOut + str(i) + \"5.bed\",\"w\")\r\n\t\tfor line in open(pathOut + i + \"4.bed\"):\r\n\t\t\tif i == \"del\":\r\n\t\t\t\tnum = random.randint(1,args.NUMBER)\r\n\t\t\t\tout.write(line.rstrip() + \"\\tdel\\t\" + str(chr_freq[num]) + \"\\t0\\n\")\r\n\t\t\telif i == \"dup\":\r\n\t\t\t\tnum = random.randint(1,args.NUMBER)\r\n\t\t\t\tcount = np.random.choice([2,3,4,5,6,7,8,9,10], 1, p=[0.5, 0.1, 0.1, 0.05, 0.05,0.05,0.05,0.05,0.05])[0]\r\n\t\t\t\tfreqs = num/args.NUMBER\r\n\t\t\t\tcp = (count*freqs) + ((1-freqs) * 1)\r\n\t\t\t\twhile cp == 1.0:\r\n\t\t\t\t\tnum = random.randint(1,args.NUMBER)\r\n\t\t\t\t\tcount = np.random.choice([2,3,4,5,6,7,8,9,10], 1, p=[0.5, 0.1, 0.1, 0.05, 0.05,0.05,0.05,0.05,0.05])[0]\r\n\t\t\t\tout.write(line.rstrip() + \"\\tdup\\t\" + str(chr_freq[num]) + \"\\t\" + str(count) + \"\\n\")\r\n\t\tout.close()\r\n\t\tfor j in chr_freq:\r\n\t\t\tout = open(pathOut + i + \".\" + str(j) + \".bed\",\"w\")\r\n\t\t\tfor line in open(pathOut + i + \"5.bed\"):\r\n\t\t\t\tif float(line.split()[4]) >= chr_freq[j]:\r\n\t\t\t\t\tout.write(line)\r\n\t\t\tout.close()\r\n\tif args.QUIET == False:\r\n\t\tprint(\"Removing overlaps, generating total file\")\r\n\tfor i in no_chrs:\r\n\t\tprint(\"Creating bedfiles for sample \" + str(i))\r\n\t\tos.system(\"bedtools makewindows -b \" + pathOut + \"chrs.bed -w 5 > \" + pathOut + \"normal.\" + str(i) + \".bed\")\r\n\t\tos.system(bedtools_path + \" intersect -v -wa -a \" + pathOut + \"normal.\" + str(i) + \".bed -b \" + pathOut + \"dup.\" + str(i) + \".bed | \" + bedtools_path + \" intersect -v -wa -a stdin -b \" + pathOut + \"del.\" + str(i) + \".bed | \" + bedtools_path + \" sort -i stdin | \" + bedtools_path + \" merge -i stdin > \" + pathOut + \"normal2.\" + str(i) + \".bed\")\r\n\t\tout = open(pathOut + \"normal3.\" + str(i) + \".bed\",\"w\")\r\n\t\tfor line in open(pathOut + \"normal2.\" + str(i) + \".bed\"):\r\n\t\t\tout.write(line.rstrip() + \"\\tnormal\\t1\\t1\\n\")\r\n\t\tout.close()\r\n\t\tos.system(\"cat \" + pathOut + \"normal3.\" + str(i) + \".bed \" + pathOut + \"dup.\" + str(i) + \".bed \" + pathOut + \"del.\" + str(i) + \".bed | \" + bedtools_path + \" sort -i stdin > \" + pathOut + \"total.\" + str(i) + \".bed\")\r\n\t\tos.remove(pathOut + \"normal3.\" + str(i) + \".bed\")\r\n\t\tos.remove(pathOut + \"normal2.\" + str(i) + \".bed\")\r\n\t\tos.remove(pathOut + \"normal.\" + str(i) + \".bed\")\r\n\tos.remove(pathOut + \"del.bed\")\r\n\tos.remove(pathOut + \"del2.bed\")\r\n\tos.remove(pathOut + \"del3.bed\")\r\n\tos.remove(pathOut + \"del4.bed\")\r\n\tos.remove(pathOut + \"del5.bed\")\r\n\tos.remove(pathOut + \"dup.bed\")\r\n\tos.remove(pathOut + \"dup2.bed\")\r\n\tos.remove(pathOut + \"dup3.bed\")\r\n\tos.remove(pathOut + \"dup4.bed\")\r\n\tos.remove(pathOut + \"dup5.bed\")\r\n\tos.remove(pathOut + \"chrs.bed\")\r\n\r\nelif argsDict['mode'] in ['recreateTotal'] or function == \"recreateTotal\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tfrom Bio import SeqIO\r\n\timport random\r\n\timport os\r\n\tout = open(pathOut + \"chrs.bed\",\"w\")\r\n\tfor r in SeqIO.parse(open(args.FASTA),\"fasta\"):\r\n\t\tout.write(\"\\t\".join([r.id,\"1\",str(len(str(r.seq)))]) + \"\\n\")\r\n\tout.close()\r\n\tif args.QUIET == False:\r\n\t\tprint(\"recreating bedfiles for sample\")\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\tos.system(\"bedtools makewindows -b \" + pathOut + \"chrs.bed -w 3 > \" + pathOut + \"normal.bed\")\r\n\tos.system(bedtools_path + \" intersect -v -wa -a \" + pathOut + \"normal.\" + str(i) + \".bed -b \" + args.DUPLICATION + \" | \" + bedtools_path + \" intersect -v -wa -a stdin -b \" + args.DELETION + \" | \" + bedtools_path + \" sort -i stdin | \" + bedtools_path + \" merge -i stdin > \" + pathOut + \"normal2.bed\")\r\n\tout = open(pathOut + \"normal3.bed\",\"w\")\r\n\tfor line in open(pathOut + \"normal2.bed\"):\r\n\t\tout.write(line.rstrip() + \"\\tnormal\\t1\\t1\\n\")\r\n\tout.close()\r\n\tos.system(\"cat \" + pathOut + \"normal3.bed \" + args.DUPLICATION + \" \" + args.DELETION + \" | \" + bedtools_path + \" sort -i stdin > \" + args.OUTPUT)\r\n\tos.remove(pathOut + \"normal3.bed\")\r\n\tos.remove(pathOut + \"normal2.bed\")\r\n\tos.remove(pathOut + \"normal.bed\")\r\n\r\nelif argsDict['mode'] in ['covSummary'] or function == \"covSummary\":\r\n\ttest = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\tcovs_median = {}\r\n\tcovs_std = {}\r\n\tcovs_mean = {}\r\n\tif args.CHROMOSOME is None:\r\n\t\tchrs = list(test[0].unique())\r\n\t\tfor i in chrs:\r\n\t\t\ttest2 = test[2][test[2] != 0][test[0] == i]\r\n\t\t\tcovs_median[i] = test2[2].median()\r\n\t\t\tcovs_mean[i] = test2[2].mean()\r\n\t\t\tcovs_std[i] = test2[2].std()\r\n\t\t\tprint(\"\\t\".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))))\r\n\telif args.CHROMOSOME is not None:\r\n\t\tfor line in open(args.CHROMOSOME):\r\n\t\t\ti = line.split()[0].rstrip()\r\n\t\t\ttest2 = test[2][test[2] != 0][test[0] == i]\r\n\t\t\tcovs_median[i] = test2[2].median()\r\n\t\t\tcovs_mean[i] = test2[2].mean()\r\n\t\t\tcovs_std[i] = test2[2].std()\r\n\t\t\tprint(i,covs_median[i],covs_mean[i],covs_std[i])\r\n\t\tcovs_median[\"total\"] = test[2][test[2] != 0].median()\r\n\t\tcovs_mean[\"total\"] = test[2][test[2] != 0].mean()\r\n\t\tcovs_std[\"total\"] = test[2][test[2] != 0].std()\r\n\t\tif args.QUIET == False:\r\n\t\t\tprint(\"total\",covs_median[\"total\"],covs_mean[\"total\"],covs_std[\"total\"])\r\n\tif(isset(args.SUMMARY)):\r\n\t\tout = open(args.SUMMARY,\"w\")\r\n\t\tfor i in covs_median:\r\n\t\t\tif args.QUIET == False:\r\n\t\t\t\tprint(\"\\t\".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))))\r\n\t\t\tout.write(\"\\t\".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))) + \"\\n\")\r\n\t\tout.close()\r\n\r\nelif argsDict['mode'] in ['winStatExtra']:\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tcov = float(args.COVERAGE)\r\n\ttest = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\tv=100\r\n\tif args.STEP_SIZE is not None:\r\n\t\tv=int(args.STEP_SIZE)\r\n\telif args.STEP_SIZE is None:\r\n\t\tv=int(args.WINDOW_SIZE)\r\n\tdef rolling_with_step(chr,s, window, step):\r\n\t\tvert_idx_list = np.arange(0, s.size - window, step)\r\n\t\thori_idx_list = np.arange(window)\r\n\t\tA, B = np.meshgrid(hori_idx_list, vert_idx_list)\r\n\t\tidx_array = A + B\r\n\t\tx_array = s.values[idx_array]\r\n\t\tidx = list(s.index[vert_idx_list + (int(window))])\r\n\t\tmed = list(np.around(list(map(np.median, x_array)),4))\r\n\t\tstd = list(np.around(list(map(np.std, x_array)),4))\r\n\t\treturn pd.DataFrame({\"chr\":chr,\"start\":vert_idx_list,\"end\":vert_idx_list+window,\"med\":med,\"std\":std})\r\n\tout_df = pd.DataFrame(columns=[\"chr\",\"start\",\"end\",\"med\",\"std\"])\r\n\tif args.CHROMOSOME is None:\r\n\t\tchrs = list(test[0].unique())\r\n\t\tfor i in chrs:\r\n\t\t\ttest_chrs = test[test[0] == i]\r\n\t\t\t#test_chrs[3] = test_chrs[2]\r\n\t\t\ttest_chrs_3 = test_chrs[2]/cov\r\n\t\t\twins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE,v)\r\n\t\t\tout_df = pd.concat([out_df,wins_step])\r\n\telif args.CHROMOSOME is not None:\r\n\t\tchrs = []\r\n\t\tfor line in open(args.CHROMOSOME):\r\n\t\t\tchrs.append(line.split()[0].rstrip())\r\n\t\tfor i in chrs:\r\n\t\t\ttest_chrs = test[test[0] == i]\r\n\t\t\ttest_chrs_3 = test_chrs[2]/cov\r\n\t\t\twins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE,v)\r\n\t\t\tout_df = pd.concat([out_df,wins_step])\r\n\tout_df = out_df.replace(r'\\\\n','', regex=True)\r\n\tout_df.to_csv(args.OUTPUT,sep=\"\\t\",index =False,columns=None,header=None)\r\n\r\nelif argsDict['mode'] in ['subTrain'] or function == \"subTrain\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tif args.NUMBER < 1.0:\r\n\t\tfract = float(args.NUMBER)\r\n\t\ttest = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\t\tout_df = pd.DataFrame(columns=test.columns)\r\n\t\tdict_types = test[3].value_counts().to_dict()\r\n\t\tfor i in dict_types:\r\n\t\t\tif dict_types[i] * fract < 10000.0:\r\n\t\t\t\tsubwin = test[test[3] ==i]\r\n\t\t\t\tout_df = pd.concat([out_df,subwin])\r\n\t\t\telif dict_types[i] * fract > 10000.0:\r\n\t\t\t\tsubwin = test[test[3] ==i].sample(replace = True, frac = fract)\r\n\t\t\t\tout_df = pd.concat([out_df,subwin])\r\n\telif args.NUMBER > 1:\r\n\t\tcount = int(args.NUMBER)\r\n\t\ttest = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\t\tout_df = pd.DataFrame(columns=test.columns)\r\n\t\tdict_types = test[3].value_counts().to_dict()\r\n\t\tfor i in dict_types:\r\n\t\t\tsubwin = test[test[3] ==i].sample(replace = True, n = count)\r\n\t\t\tout_df = pd.concat([out_df,subwin])\r\n\tout_df = out_df.round(3)\r\n\tout_df.to_csv(args.OUTPUT,sep=\"\\t\",index =False,columns=None,header=None)\r\n\r\nelif argsDict['mode'] in ['simReads'] or function == \"simReads\":\r\n\tfrom Bio import SeqIO\r\n\timport os\r\n\tcov = args.COVERAGE\r\n\tpathOut = args.DIRECTORY\r\n\tif pathOut != \"\" and pathOut.endswith(\"/\") == False:\r\n\t\tpathOut += \"/\"\r\n\tchr_lens = {}\r\n\tif args.SE == False:\r\n\t\tfor r in SeqIO.parse(open(args.FASTA),\"fasta\"):\r\n\t\t\tchr_lens[r.id] = len(str(r.seq))\r\n\t\tif args.CHROMOSOME is not None:\r\n\t\t\tfor line in open(args.CHROMOSOME,\"r\"):\r\n\t\t\t\tchr = line.split()[0].rstrip()\r\n\t\t\t\treads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)\r\n\t\t\t\tos.system(wgsim_path + \" -N \" + str(reads) + \" -1 \" + str(args.READ_LENGTH) + \" -2 \" + str(args.READ_LENGTH) + \" \" + pathOut + chr + \"_\" + args.ID + \"_CNV.fa \" + pathOut + chr + \"_1.fq \" + pathOut + chr + \"_2.fq > stdout\")\r\n\t\t\tfor line in open(args.CHROMOSOME,\"r\"):\r\n\t\t\t\tchr = line.split()[0].rstrip()\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \"_1.fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \"_1.fq\")\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \"_2.fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \"_2.fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \"_1.fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \"_2.fq\")\r\n\t\telif args.CHROMOSOME is None:\r\n\t\t\tfor chr in chr_lens:\r\n\t\t\t\treads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)\r\n\t\t\t\tos.system(wgsim_path + \" -N \" + str(reads) + \" -1 \" + str(args.READ_LENGTH) + \" -2 \" + str(args.READ_LENGTH) + \" \" + pathOut + chr + \"_\" + args.ID + \"_CNV.fa \" + pathOut + chr + \"_1.fq \" + pathOut + chr + \"_2.fq > stdout\")\r\n\t\t\tfor chr in chr_lens:\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \"_1.fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \"_1.fq\")\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \"_2.fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \"_2.fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \"_1.fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \"_2.fq\")\r\n\telif args.SE == True:\r\n\t\tfor r in SeqIO.parse(open(args.FASTA),\"fasta\"):\r\n\t\t\tchr_lens[r.id] = len(str(r.seq))\r\n\t\tif args.CHROMOSOME is not None:\r\n\t\t\tfor line in open(args.CHROMOSOME,\"r\"):\r\n\t\t\t\tchr = line.split()[0].rstrip()\r\n\t\t\t\treads = round(chr_lens[chr]/(int(args.READ_LENGTH)))*int(cov)\r\n\t\t\t\tos.system(wgsim_path + \" -N \" + str(reads) + \" -1 \" + str(args.READ_LENGTH) + \" \" + pathOut + chr + \"_\" + args.ID + \"_CNV.fa \" + pathOut + chr + \".fq /dev/null > stdout\")\r\n\t\t\tfor line in open(args.CHROMOSOME,\"r\"):\r\n\t\t\t\tchr = line.split()[0].rstrip()\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \".fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \".fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \".fq\")\r\n\t\telif args.CHROMOSOME is None:\r\n\t\t\tfor chr in chr_lens:\r\n\t\t\t\treads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)\r\n\t\t\t\tos.system(wgsim_path + \" -N \" + str(reads) + \" -1 \" + str(args.READ_LENGTH) + \" \" + pathOut + chr + \"_\" + args.ID + \"_CNV.fa \" + pathOut + chr + \".fq /dev/null > stdout\")\r\n\t\t\tfor chr in chr_lens:\r\n\t\t\t\tos.system(\"cat \" + pathOut + chr + \".fq >> \" + pathOut + args.ID + \"_\" + str(args.COVERAGE) + \".fq\")\r\n\t\t\t\tos.remove(pathOut + chr + \".fq\")\r\n\r\nelif argsDict['mode'] in ['summarize'] or function == \"summarize\":\r\n\timport os\r\n\timport sys\r\n\timport math\r\n\timport shutil\r\n\tos.system(\"grep -w 'Del' \" + args.INPUT + \" | \" + bedtools_path + \" sort -i stdin | \" + bedtools_path + \" merge -c 4,6,7,8,9 -o distinct,mode,mode,mode,mode -d \" + str(args.WINDOW_SIZE) + \" -i stdin > del_temp_total.bed\")\r\n\tos.system(\"grep -w 'Dup' \" + args.INPUT + \" | \" + bedtools_path + \" sort -i stdin | \" + bedtools_path + \" merge -c 4,6,7,8,9 -o distinct,mode,mode,mode,mode -d \" + str(args.WINDOW_SIZE) + \" -i stdin > dup_temp_total.bed\")\r\n\tos.system(\"grep -v 'Dup' \" + args.INPUT + \" | grep -v 'Del' > non_temp_total.bed\")\r\n\tif args.DELETION is not None and args.DUPLICATION is not None:\r\n\t\tos.system(bedtools_path + \" intersect -wa -wb -a \" + args.DELETION + \" -b del_temp_total.bed > Del_temp_True-Positive.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -wa -wb -a \" + args.DUPLICATION + \" -b dup_temp_total.bed > Dup_temp_True-Positive.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -wa -v -a \" + args.DELETION + \" -b del_temp_total.bed > Del_temp_False-Negative.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -wa -v -a \" + args.DUPLICATION + \" -b dup_temp_total.bed > Dup_temp_False-Negative.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -wa -v -a del_temp_total.bed -b \" + args.DELETION + \" > Del_temp_False-Positive.bed\")\r\n\t\tos.system(bedtools_path + \" intersect -wa -v -a dup_temp_total.bed -b \" + args.DUPLICATION + \" > Dup_temp_False-Positive.bed\")\r\n\t\tfor i in [\"Del\",\"Dup\"]:\r\n\t\t\tout = open(i + \"_temp_False-Negative2.bed\", \"w\")\r\n\t\t\tfor line in open(i + \"_temp_False-Negative.bed\"):\r\n\t\t\t\tout.write(\"\\t\".join([line.split()[0],line.split()[1],line.split()[2],args.ID,i,\"1.0\",\"NA\",\"1.0\",\"False-Negative\"]) + \"\\n\")\r\n\t\t\tout.close()\r\n\t\t\tout = open(i + \"_temp_False-Positive2.bed\", \"w\")\r\n\t\t\tfor line in open(i + \"_temp_False-Positive.bed\"):\r\n\t\t\t\tout.write(line.rstrip() + \"\\tFalse-Positive\\n\")\r\n\t\t\tout.close()\r\n\t\t\tos.system(bedtools_path + \" sort -i \" + i + \"_temp_True-Positive.bed | \" + bedtools_path + \" merge -c 10,11,12,13,14 -o distinct,mode,mode,mode,mode -i stdin > \" + i + \"_temp_True-Positive2.bed\")\r\n\t\t\tout = open(i + \"_temp_True-Positive3.bed\",\"w\")\r\n\t\t\tfor line in open(i + \"_temp_True-Positive2.bed\"):\r\n\t\t\t\tout.write(line.rstrip() + \"\\tTrue-Positive\\n\")\r\n\t\t\tout.close()\r\n\t\tos.system(\"cat Del_temp_True-Positive3.bed Dup_temp_True-Positive3.bed Dup_temp_False-Positive2.bed Del_temp_False-Positive2.bed Del_temp_False-Negative2.bed Dup_temp_False-Negative2.bed | \" + bedtools_path + \" sort -i stdin > total_sum_temp.bed\")\r\n\t\tout = open(args.OUTPUT,\"w\")\r\n\t\tfor line in open(\"total_sum_temp.bed\"):\r\n\t\t\tif float(line.split()[5]) > args.CUTOFF:\r\n\t\t\t\tout.write(line)\r\n\t\tout.close()\r\n\t\tfor k in [\"dup_temp_total.bed\",\"del_temp_total.bed\",\"Dup_temp_True-Positive.bed\",\"Del_temp_True-Positive.bed\",\"Del_temp_False-Negative.bed\",\"Dup_temp_False-Negative.bed\",\"Del_temp_False-Positive.bed\",\"Dup_temp_False-Positive.bed\",\"Dup_temp_True-Positive2.bed\",\"Del_temp_True-Positive2.bed\",\"Del_temp_False-Negative2.bed\",\"Dup_temp_False-Negative2.bed\",\"Del_temp_False-Positive2.bed\",\"Dup_temp_False-Positive2.bed\",\"Dup_temp_True-Positive3.bed\",\"Del_temp_True-Positive3.bed\",\"total_sum_temp.bed\"]:\r\n\t\t\tos.remove(k)\r\n\telif args.DELETION is None and args.DUPLICATION is None:\r\n\t\tos.system(\"cat dup_temp_total.bed del_temp_total.bed | \" + bedtools_path + \" sort -i stdin > total_sum_temp.bed\")\r\n\t\tout = open(args.OUTPUT,\"w\")\r\n\t\tfor line in open(\"total_sum_temp.bed\"):\r\n\t\t\tif float(line.split()[5]) > args.CUTOFF:\r\n\t\t\t\t\tout.write(line)\r\n\t\tout.close()\r\n\t\tos.remove(\"dup_temp_total.bed\")\r\n\t\tos.remove(\"del_temp_total.bed\")\r\n\t\tos.remove(\"total_sum_temp.bed\")\r\n\r\nif argsDict['mode'] in ['ROC'] or function == \"ROC\":\r\n\timport pandas as pd\r\n\timport numpy as np\r\n\tfrom sklearn.ensemble import RandomForestClassifier\r\n\tfrom sklearn.datasets import make_classification\r\n\tfrom sklearn.externals import joblib\r\n\timport os\r\n\tfrom itertools import cycle\r\n\tfrom sklearn import svm, datasets\r\n\tfrom sklearn.metrics import roc_curve, auc\r\n\tfrom scipy import interp\r\n\tfrom sklearn import metrics\r\n\tfrom sklearn.tree import DecisionTreeClassifier\r\n\tfrom sklearn.neural_network import MLPClassifier\r\n\tfrom sklearn.ensemble import ExtraTreesClassifier\r\n\tmodels = {\"RFC100\":RandomForestClassifier(n_estimators=100), \"RFC500\":RandomForestClassifier(n_estimators=500), \"CNN\":MLPClassifier(), \"ETC100\":ExtraTreesClassifier(n_estimators=100), \"ETC500\":ExtraTreesClassifier(n_estimators=500), \"DTC\":DecisionTreeClassifier()}\r\n\ttraining_in = pd.read_csv(args.INPUT,header=None,sep=\"\\t\")\r\n\tclf = joblib.load(args.TRAIN)\r\n\tout_df = pd.DataFrame(columns=[\"type\",\"fpr\",\"tpr\"])\r\n\tfor i in [\"Del\",\"Dup\"]:\r\n\t\ttraining_in_subA = training_in[training_in[3] == \"N\" ]\r\n\t\ttraining_in_subB = training_in[training_in[3] == i]\r\n\t\ttraining_in_subC = pd.concat([training_in_subA,training_in_subB])\r\n\t\ttraining_in_sub2 = training_in_subC.drop(training_in_subC[[0,1,2,3,4]], axis=1)\r\n\t\ttraining_in_sub2.columns = list(range(0,len(training_in_sub2.columns)))\r\n\t\ttraining_in_subC[3][training_in_subC[3] == \"N\"] = 2\r\n\t\ttraining_in_subC[3][training_in_subC[3] == i] = 1\r\n\t\ttraining_in_sub_prob = np.array(list(clf.predict_proba(training_in_sub2)[:, 1]))\r\n\t\tsub_in = np.array(list(training_in_subC[3].as_matrix()))\r\n\t\tfpr, tpr, threshold = roc_curve(sub_in, training_in_sub_prob, pos_label=2)\r\n\t\tsub_list = pd.DataFrame({\"type\":i,\"fpr\":list(fpr),\"tpr\":list(tpr)})\r\n\t\tout_df = pd.concat([out_df,sub_list])\r\n\tout_df.to_csv(args.OUTPUT,sep=\"\\t\",index =False)\r\n\r\nif argsDict['mode'] in ['quantify'] or function == \"quantify\":\r\n\timport pandas as pd\r\n\timport os\r\n\timport shutil\r\n\tdef myround(x, base=args.WINDOW_SIZE):\r\n\t\treturn base * round(x/base)\r\n\tdef factor_counts_gff(row):\r\n\t\trow_counts = []\r\n\t\tt = row.iloc[4:].value_counts()\r\n\t\trow_counts.append(row[0])\r\n\t\trow_counts.append(row[1])\r\n\t\trow_counts.append(row[2])\r\n\t\trow_counts.append(row[3])\r\n\t\trow_counts.append(sum(t[t.index == \"N\"]))\r\n\t\trow_counts.append(sum(t[t.index == \"Del\"]))\r\n\t\trow_counts.append(sum(t[t.index == \"Dup\"]))\r\n\t\treturn(row_counts)\r\n\tdef copy_counts_gff(row):\r\n\t\trow_counts = []\r\n\t\tt = row.iloc[4:].value_counts()\r\n\t\trow_counts.append(row[0])\r\n\t\trow_counts.append(row[1])\r\n\t\trow_counts.append(row[2])\r\n\t\trow_counts.append(row[3])\r\n\t\trow_counts.append(sum(t[t.index == 0.0]))\r\n\t\trow_counts.append(sum(t[t.index == 1.0]))\r\n\t\trow_counts.append(sum(t[t.index == 2.0]))\r\n\t\trow_counts.append(sum(t[t.index == 3.0]))\r\n\t\trow_counts.append(sum(t[t.index == 4.0]))\r\n\t\trow_counts.append(sum(t[t.index == 5.0]))\r\n\t\trow_counts.append(sum(t[t.index == 6.0]))\r\n\t\trow_counts.append(sum(t[t.index == 7.0]))\r\n\t\trow_counts.append(sum(t[t.index == 8.0]))\r\n\t\trow_counts.append(sum(t[t.index == 9.0]))\r\n\t\trow_counts.append(sum(t[t.index >= 10.0]))\r\n\t\treturn(row_counts)\r\n\tif args.GFF is not None:\r\n\t\tcomb_CN = pd.DataFrame(columns=[\"chr\",\"start\",\"end\",\"gene\"])\r\n\t\tcomb_CP = pd.DataFrame(columns=[\"chr\",\"start\",\"end\",\"gene\"])\r\n\t\tcount = 1\r\n\t\tfor line in open(args.INPUT,\"r\"):\r\n\t\t\tprint(\"processing \" + line.rstrip())\r\n\t\t\tos.system(bedtools_path + \"\"\" intersect -wa -wb -a \"\"\" + args.GFF + \"\"\" -b \"\"\" + line.rstrip() + \"\"\" | awk -F \"\\t\" '{print $1\"\\t\"$4\"\\t\"$5\"\\t\"$13\"\\t\"$15\"\\t\"$16\"\\t\"$17\"\\t\"$18}' > dudeml_temp1.bed\"\"\")\r\n\t\t\tos.system(bedtools_path + \"\"\" intersect -wa -wb -a \"\"\" + args.GFF + \"\"\" -b \"\"\" + line.rstrip() + \"\"\" | awk -F \"ID=\" '{print $2}' | awk -F \";\" '{print $1}' | awk -F \"-mRNA-1\" '{print $1}' > dudeml_temp2.bed\"\"\")\r\n\t\t\tos.system(\"paste dudeml_temp1.bed dudeml_temp2.bed > dudeml_temp3.bed\")\r\n\t\t\tos.mkdir('tempDir_bed')\r\n\t\t\tdf = pd.read_csv(\"dudeml_temp3.bed\",header = None,sep=\"\\t\")\r\n\t\t\tdf_grouped = df.groupby(8)\r\n\t\t\tfor index, group in df_grouped:\r\n\t\t\t\tgroup.to_csv(\"tempDir_bed/\" + index,sep=\"\\t\",index =False,header=False)\r\n\t\t\t\t# os.system(bedtools_path + \" sort -i tempDir_bed/\" + index + \" | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed\")\r\n\t\t\tos.system(\"\"\"for file in tempDir_bed/*; do \"\"\" + bedtools_path + \"\"\" sort -i ${file} | \"\"\" + bedtools_path + \"\"\" merge -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed; done\"\"\")\r\n\t\t\t#for v in list(df[8].unique()):\r\n\t\t\t#\tsub = df[df[8] == v]\r\n\t\t\t#\tcomb_CP4.to_csv(\"tempDir_bed/\" + v ,sep=\"\\t\",index =False,header=False)\r\n\t\t\t#for line in open(\"dudeml_temp3.bed\",\"r\"):\r\n\t\t\t#\tout = open(\"tempDir_bed/\" + line.rstrip().split(\"\\t\")[-1],\"a\")\r\n\t\t\t#\t\tout.write(line)\r\n\t\t\t#for d,s,f in os.walk(\"tempDir_bed/\"):\r\n\t\t\t#\tfor inf in f:\r\n\t\t\t#\t\tos.system(bedtools_path + \" sort -i tempDir_bed/\" + inf + \" | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed\")\r\n\t\t\tshutil.rmtree(\"tempDir_bed/\")\r\n\t\t\tos.system(bedtools_path + \" sort -i dudeml_temp4.bed > dudeml_temp5.bed\")\r\n\t\t\tos.remove(\"dudeml_temp4.bed\")\r\n\t\t\t# os.system(bedtools_path + \" sort -i dudeml_temp3.bed | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct > dudeml_temp4.bed\")\r\n\t\t\tdf = pd.read_csv(\"dudeml_temp5.bed\",header = None,sep=\"\\t\")\r\n\t\t\tdf.columns = [\"chr\",\"start\",\"end\",\"strain\",\"CNV\",\"CNVprob\",\"CP\",\"CPprob\",\"gene\"]\r\n\t\t\tdf.loc[(df['CNV'] == \"Dup\") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = \"N\"\r\n\t\t\tdf.loc[(df['CNV'] == \"Del\") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = \"N\"\r\n\t\t\tcomb_CN['chr'] = df['chr']\r\n\t\t\tcomb_CN['start'] = df['start']\r\n\t\t\tcomb_CN['end'] = df['end']\r\n\t\t\tcomb_CN['gene'] = df['gene']\r\n\t\t\tcomb_CP['chr'] = df['chr']\r\n\t\t\tcomb_CP['start'] = df['start']\r\n\t\t\tcomb_CP['end'] = df['end']\r\n\t\t\tcomb_CP['gene'] = df['gene']\r\n\t\t\tif pd.isnull(df['strain'][0]) == False:\r\n\t\t\t\tcomb_CP[str(df['strain'][0])] = df[\"CP\"]\r\n\t\t\t\tcomb_CN[str(df['strain'][0])] = df[\"CNV\"]\r\n\t\t\t\tcount += 1\r\n\t\t\telif pd.isnull(df['strain'][0]) == True:\r\n\t\t\t\tcomb_CP[str(count)] = df[\"CP\"]\r\n\t\t\t\tcomb_CN[str(count)] = df[\"CNV\"]\r\n\t\t\t\tcount += 1\r\n\t\tcomb_CP.to_csv(args.OUTPUT + \".copy_raw.txt\",sep=\"\\t\",index =False)\r\n\t\tcomb_CN.to_csv(args.OUTPUT + \".factor_raw.txt\",sep=\"\\t\",index =False)\r\n\t\tprint(\"Quantify CNVs in each window.\")\r\n\t\tcomb_CP2 = comb_CP.apply(copy_counts_gff, axis=1)\r\n\t\tcomb_CN2 = comb_CN.apply(factor_counts_gff, axis=1)\r\n\t\tcomb_CP3 = pd.DataFrame(comb_CP2)\r\n\t\tcomb_CN3 = pd.DataFrame(comb_CN2)\r\n\t\tcomb_CP4 = pd.DataFrame()\r\n\t\tcomb_CN4 = pd.DataFrame()\r\n\t\tcomb_CN4[[\"chr\",\"start\",\"end\",\"gene\",\"N\",\"Del\",\"Dup\"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)\r\n\t\tcomb_CP4[[\"chr\",\"start\",\"end\",\"gene\",\"0.0\",\"1.0\",\"2.0\",\"3.0\",\"4.0\",\"5.0\",\"6.0\",\"7.0\",\"8.0\",\"9.0\",\">=10.0\"]] = pd.DataFrame(comb_CP3[0].values.tolist(), index= comb_CP3.index)\r\n\t\tcomb_CP4.to_csv(args.OUTPUT + \".copy.txt\",sep=\"\\t\",index =False)\r\n\t\tcomb_CN4.to_csv(args.OUTPUT + \".factor.txt\",sep=\"\\t\",index =False)\r\n\t\tos.remove(\"dudeml_temp1.bed\")\r\n\t\tos.remove(\"dudeml_temp2.bed\")\r\n\t\tos.remove(\"dudeml_temp3.bed\")\r\n\t\tos.remove(\"dudeml_temp5.bed\")\r\n\telif args.GFF is None:\r\n\t\tdef copy_counts(row):\r\n\t\t\trow_counts = []\r\n\t\t\tt = row.iloc[2:].value_counts()\r\n\t\t\trow_counts.append(row[0])\r\n\t\t\trow_counts.append(row[1])\r\n\t\t\trow_counts.append(row[2])\r\n\t\t\trow_counts.append(sum(t[t.index == 0.0]))\r\n\t\t\trow_counts.append(sum(t[t.index == 1.0]))\r\n\t\t\trow_counts.append(sum(t[t.index == 2.0]))\r\n\t\t\trow_counts.append(sum(t[t.index == 3.0]))\r\n\t\t\trow_counts.append(sum(t[t.index == 4.0]))\r\n\t\t\trow_counts.append(sum(t[t.index >= 5.0]))\r\n\t\t\treturn(row_counts)\r\n\t\tdef factor_counts(row):\r\n\t\t\trow_counts = []\r\n\t\t\tt = row.iloc[2:].value_counts()\r\n\t\t\trow_counts.append(row[0])\r\n\t\t\trow_counts.append(row[1])\r\n\t\t\trow_counts.append(row[2])\r\n\t\t\trow_counts.append(sum(t[t.index == \"N\"]))\r\n\t\t\trow_counts.append(sum(t[t.index == \"Del\"]))\r\n\t\t\trow_counts.append(sum(t[t.index == \"Dup\"]))\r\n\t\t\treturn(row_counts)\r\n\t\tcomb_CN = pd.DataFrame(columns=[\"chr\",\"start\",\"end\"])\r\n\t\tcomb_CP = pd.DataFrame(columns=[\"chr\",\"start\",\"end\"])\r\n\t\tcount = 1\r\n\t\tfor line in open(args.INPUT,\"r\"):\r\n\t\t\tprint(\"processing \" + line.rstrip())\r\n\t\t\tdf = pd.read_csv(line.rstrip(),header = None,sep=\"\\t\")\r\n\t\t\tdf.columns = [\"chr\",\"start\",\"end\",\"strain\",\"cov\",\"CNV\",\"CNVprob\",\"CP\",\"CPprob\"]\r\n\t\t\tdf.loc[(df['CNV'] == \"Dup\") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = \"N\"\r\n\t\t\tdf.loc[(df['CNV'] == \"Del\") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = \"N\"\r\n\t\t\tcomb_CN['chr'] = df['chr']\r\n\t\t\tcomb_CN['start'] = df['start']\r\n\t\t\tcomb_CN['end'] = df['end']\r\n\t\t\tcomb_CP['chr'] = df['chr']\r\n\t\t\tcomb_CP['start'] = df['start']\r\n\t\t\tcomb_CP['end'] = df['end']\r\n\t\t\tif pd.isnull(df['strain'][0]) == False:\r\n\t\t\t\tcomb_CP[str(df['strain'][0])] = df[\"CP\"]\r\n\t\t\t\tcomb_CN[str(df['strain'][0])] = df[\"CNV\"]\r\n\t\t\t\tcount += 1\r\n\t\t\telif pd.isnull(df['strain'][0]) == True:\r\n\t\t\t\tcomb_CP[str(count)] = df[\"CP\"]\r\n\t\t\t\tcomb_CN[str(count)] = df[\"CNV\"]\r\n\t\t\t\tcount += 1\r\n\t\tprint(\"Quantify CNVs in each window.\")\r\n\t\tcomb_CP2 = comb_CP.apply(copy_counts, axis=1)\r\n\t\tcomb_CN2 = comb_CN.apply(factor_counts, axis=1)\r\n\t\tcomb_CP3 = pd.DataFrame(comb_CP2)\r\n\t\tcomb_CN3 = pd.DataFrame(comb_CN2)\r\n\t\tcomb_CP4 = pd.DataFrame()\r\n\t\tcomb_CP4[[\"chr\",\"start\",\"end\",\"0\",\"1.0\",\"2.0\",\"3.0\",\"4.0\",\">=5.0\"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)\r\n\t\tcomb_CN4 = pd.DataFrame()\r\n\t\tcomb_CN4[[\"chr\",\"start\",\"end\",\"N\",\"Del\",\"Dup\"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)\r\n\t\tcomb_CN4 = comb_CN4.loc[comb_CN4['Del'] != 0 or comb_CN4['Dup'] != 0]\r\n\t\tcomb_CP4 = comb_CP4.loc[comb_CN4['Del'] != 0 or comb_CN4['Dup'] != 0]\r\n\t\tcomb_CP4.to_csv(args.OUTPUT + \".copy\",sep=\"\\t\",index =False)\r\n\t\tcomb_CN4.to_csv(args.OUTPUT + \".factor\",sep=\"\\t\",index =False)\r\n", "sub_path": "dudeML.py", "file_name": "dudeML.py", "file_ext": "py", "file_size_in_byte": 57536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.HelpFormatter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.HelpFormatter._split_lines", "line_number": 17, "usage_type": "call"}, {"api_name": "argparse.HelpFormatter", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 161, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 198, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 198, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 199, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 199, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 201, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 214, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 215, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path", "line_number": 218, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 227, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 237, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 237, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 238, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 238, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 260, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 261, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 268, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 270, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 271, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 271, "usage_type": "name"}, {"api_name": "os.system", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.around", "line_number": 314, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 314, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 314, "usage_type": "name"}, {"api_name": "numpy.around", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.around", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 316, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 317, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 329, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 334, "usage_type": "call"}, {"api_name": "os.system", "line_number": 344, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 349, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 349, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 369, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 381, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 383, "usage_type": "call"}, {"api_name": "os.system", "line_number": 386, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 388, "usage_type": "call"}, {"api_name": "os.system", "line_number": 404, "usage_type": "call"}, {"api_name": "os.system", "line_number": 405, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 459, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 460, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 461, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 462, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 463, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 464, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 475, "usage_type": "call"}, {"api_name": "os.system", "line_number": 481, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 483, "usage_type": "call"}, {"api_name": "os.system", "line_number": 514, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 515, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 523, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 524, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 531, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 531, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 537, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 541, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 544, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 545, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 548, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 549, "usage_type": "call"}, {"api_name": "os.system", "line_number": 555, "usage_type": "call"}, {"api_name": "os.system", "line_number": 556, "usage_type": "call"}, {"api_name": "os.system", "line_number": 558, "usage_type": "call"}, {"api_name": "os.system", "line_number": 559, "usage_type": "call"}, {"api_name": "os.system", "line_number": 561, "usage_type": "call"}, {"api_name": "os.system", "line_number": 562, "usage_type": "call"}, {"api_name": "os.system", "line_number": 563, "usage_type": "call"}, {"api_name": "os.system", "line_number": 564, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 579, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 583, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 588, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 588, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 601, "usage_type": "call"}, {"api_name": "os.system", "line_number": 602, "usage_type": "call"}, {"api_name": "os.system", "line_number": 607, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 608, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 609, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 610, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 611, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 612, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 613, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 614, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 615, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 616, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 617, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 618, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 619, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 620, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 621, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 630, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 630, "usage_type": "name"}, {"api_name": "os.system", "line_number": 638, "usage_type": "call"}, {"api_name": "os.system", "line_number": 639, "usage_type": "call"}, {"api_name": "os.system", "line_number": 644, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 645, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 646, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 647, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 650, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 687, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 694, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 695, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 696, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 700, "usage_type": "attribute"}, {"api_name": "numpy.around", "line_number": 701, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 701, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 702, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 703, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 711, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 720, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 729, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 730, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 735, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 738, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 741, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 742, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 746, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 759, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 759, "usage_type": "name"}, {"api_name": "os.system", "line_number": 765, "usage_type": "call"}, {"api_name": "os.system", "line_number": 768, "usage_type": "call"}, {"api_name": "os.system", "line_number": 769, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 770, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 771, "usage_type": "call"}, {"api_name": "os.system", "line_number": 775, "usage_type": "call"}, {"api_name": "os.system", "line_number": 777, "usage_type": "call"}, {"api_name": "os.system", "line_number": 778, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 779, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 780, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 782, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 782, "usage_type": "name"}, {"api_name": "os.system", "line_number": 788, "usage_type": "call"}, {"api_name": "os.system", "line_number": 791, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 792, "usage_type": "call"}, {"api_name": "os.system", "line_number": 796, "usage_type": "call"}, {"api_name": "os.system", "line_number": 798, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 799, "usage_type": "call"}, {"api_name": "os.system", "line_number": 806, "usage_type": "call"}, {"api_name": "os.system", "line_number": 807, "usage_type": "call"}, {"api_name": "os.system", "line_number": 808, "usage_type": "call"}, {"api_name": "os.system", "line_number": 810, "usage_type": "call"}, {"api_name": "os.system", "line_number": 811, "usage_type": "call"}, {"api_name": "os.system", "line_number": 812, "usage_type": "call"}, {"api_name": "os.system", "line_number": 813, "usage_type": "call"}, {"api_name": "os.system", "line_number": 814, "usage_type": "call"}, {"api_name": "os.system", "line_number": 815, "usage_type": "call"}, {"api_name": "os.system", "line_number": 825, "usage_type": "call"}, {"api_name": "os.system", "line_number": 830, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 837, "usage_type": "call"}, {"api_name": "os.system", "line_number": 839, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 845, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 846, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 847, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 864, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 864, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 864, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 864, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 865, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 866, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 866, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 867, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 871, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 876, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 877, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 878, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 879, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 880, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 920, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 921, "usage_type": "call"}, {"api_name": "os.system", "line_number": 925, "usage_type": "call"}, {"api_name": "os.system", "line_number": 926, "usage_type": "call"}, {"api_name": "os.system", "line_number": 927, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 928, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 929, "usage_type": "call"}, {"api_name": "os.system", "line_number": 934, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 944, "usage_type": "call"}, {"api_name": "os.system", "line_number": 945, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 946, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 948, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 960, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 964, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 973, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 974, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 975, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 976, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 977, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 978, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 981, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 982, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 983, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 984, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1009, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1010, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1014, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 1024, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 1028, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1035, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1036, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1037, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1038, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1039, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1040, "usage_type": "call"}]} +{"seq_id": "20541919", "text": "#!/usr/bin/env python3\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2017 Erik Perillo \n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\"\"\"\n\nimport tensorflow as tf\nimport sys\nimport random\nimport shutil\nimport time\nimport itertools\nimport numpy as np\nimport os\n\nimport util\nimport model\nfrom config import infer as conf\nimport config\nfrom dproc import infer_load as load\nfrom dproc import infer_pre_proc as pre_proc\nfrom dproc import infer_save_x as save_x\nfrom dproc import infer_save_y_pred as save_y_pred\nfrom dproc import infer_save_y_true as save_y_true\n\ndef predict(x, fn):\n x = x.reshape((1, ) + x.shape)\n y_pred = fn(x)\n y_pred = y_pred.reshape(y_pred.shape[2:])\n return y_pred\n\ndef mk_preds_dir(base_dir, pattern=\"train\"):\n \"\"\"\n Creates dir to store predictions.\n \"\"\"\n #creating dir\n out_dir = util.uniq_path(base_dir, pattern)\n os.makedirs(out_dir)\n return out_dir\n\ndef main():\n if conf[\"rand_seed\"] is not None:\n random.seed(conf[\"rand_seed\"])\n\n if conf[\"input_fps\"] is None:\n if len(sys.argv) < 2:\n print(\"usage: {} \".format(sys.argv[0]))\n exit()\n else:\n input_fps = sys.argv[1]\n else:\n input_fps = conf[\"input_fps\"]\n\n if isinstance(input_fps, str):\n input_fps = [input_fps]\n\n if conf[\"shuffle_input_fps\"]:\n random.shuffle(input_fps)\n\n preds = None\n trues = None\n\n #creating base dir if needed\n if not os.path.isdir(conf[\"preds_save_dir_basedir\"]):\n os.makedirs(conf[\"preds_save_dir_basedir\"])\n #creating preds dir\n preds_dir = mk_preds_dir(conf[\"preds_save_dir_basedir\"], \"preds\")\n #copying model dir\n util.mk_model_dir(preds_dir)\n\n #meta-model\n meta_model = model.MetaModel()\n\n with tf.Session(graph=tf.Graph()) as sess:\n #loading model weights\n print(\"loading model\", flush=True)\n model.load(sess, conf[\"model_path\"])\n print(\"setting params\", flush=True)\n meta_model.set_params_from_colls()\n\n #building functions\n print(\"getting pred fn\", flush=True)\n _pred_fn = meta_model.get_pred_fn(sess)\n pred_fn = lambda x: predict(x, _pred_fn)\n\n print(\"iterating\", flush=True)\n indexes = None\n #iterating over images doing predictions\n for i, fp in enumerate(input_fps):\n print(\"on image '{}'\".format(fp))\n\n x, y_true = load(fp)\n\n print(\"\\tpredicting...\")\n print(\"\\tx shape:\", x.shape)\n x_ = pre_proc(x.copy())\n start_time = time.time()\n y_pred = pred_fn(x_)\n pred_time = time.time() - start_time\n print(\"\\tdone predicting. took {:.6f} seconds\".format(pred_time))\n print(\"\\ty_pred shape:\", y_pred.shape)\n\n if conf[\"save_tables\"]:\n #getting indexes\n if indexes is None:\n pts_per_img = y_pred.size\n if conf[\"max_pred_points\"] is not None:\n pts_per_img = min(\n conf[\"max_pred_points\"]//len(input_fps),\n pts_per_img)\n indexes = list(range(pts_per_img))\n\n if len(indexes) < y_pred.size:\n random.shuffle(indexes)\n if preds is None:\n preds = y_pred.flatten()[indexes]\n else:\n preds = np.vstack((preds, y_pred.flatten()[indexes]))\n if conf[\"with_trues\"]:\n if trues is None:\n trues = y_true.flatten()[indexes]\n else:\n trues = np.vstack((trues, y_true.flatten()[indexes]))\n\n if conf[\"max_n_preds_save\"] is None or i < conf[\"max_n_preds_save\"]:\n fn = os.path.basename(fp)\n name = fn.split(\".\")[0]\n ext = (\".\" + fn.split(\".\")[-1]) if \".\" in fn else \"\"\n\n #saving x\n if save_x is not None:\n save_x(x, preds_dir, name)\n #saving prediction\n if save_y_pred is not None:\n save_y_pred(y_pred, preds_dir, name)\n #saving ground-truth\n if save_y_true is not None and conf[\"with_trues\"]:\n save_y_true(y_true, preds_dir, name)\n\n #saving predictions\n if conf[\"save_tables\"] and preds is not None:\n fp = os.path.join(preds_dir, \"table.npz\")\n print(\"saving table to '{}'...\".format(fp, flush=True))\n if conf[\"with_trues\"]:\n np.savez(fp, y_pred=preds, y_true=trues, x_fp=input_fps)\n else:\n np.savez(fp, y_pred=preds, x_fp=input_fps)\n\n print(\"saved everything to\", preds_dir)\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "att/upeek/upeek/infer.py", "file_name": "infer.py", "file_ext": "py", "file_size_in_byte": 5854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "util.uniq_path", "line_number": 57, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 58, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 62, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 63, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 63, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 65, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.infer", "line_number": 72, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 77, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "config.infer", "line_number": 84, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 85, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 85, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 87, "usage_type": "name"}, {"api_name": "util.mk_model_dir", "line_number": 89, "usage_type": "call"}, {"api_name": "model.MetaModel", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 94, "usage_type": "call"}, {"api_name": "model.load", "line_number": 97, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 97, "usage_type": "name"}, {"api_name": "dproc.infer_load", "line_number": 112, "usage_type": "call"}, {"api_name": "dproc.infer_pre_proc", "line_number": 116, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 119, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 123, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 127, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 129, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 138, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 143, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "dproc.infer_save_x", "line_number": 151, "usage_type": "name"}, {"api_name": "dproc.infer_save_x", "line_number": 152, "usage_type": "call"}, {"api_name": "dproc.infer_save_y_pred", "line_number": 154, "usage_type": "name"}, {"api_name": "dproc.infer_save_y_pred", "line_number": 155, "usage_type": "call"}, {"api_name": "dproc.infer_save_y_true", "line_number": 157, "usage_type": "name"}, {"api_name": "config.infer", "line_number": 157, "usage_type": "name"}, {"api_name": "dproc.infer_save_y_true", "line_number": 158, "usage_type": "call"}, {"api_name": "config.infer", "line_number": 161, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "config.infer", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "145846894", "text": "#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.optimize import fsolve\nimport phd.viz\n_, palette = phd.viz.phd_style()\nsns.set_palette('magma')\n\n# Define functions to be used in figure\ndef pact(IPTG, K_A, K_I, e_AI):\n '''\n Computes the probability that a repressor is active\n Parameters\n ----------\n IPTG : array-like\n Array of IPTG concentrations in uM\n K_A : float\n Dissociation constant for active repressor\n K_I : float\n Dissociation constant for inactive repressor\n e_AI : float\n Energetic difference between the active and inactive state\n Returns\n -------\n probability that repressor is active\n '''\n pact = (1 + IPTG * 1 / K_A)**2 / \\\n (((1 + IPTG * 1 / K_A))**2 + np.exp(-e_AI) * (1 + IPTG * 1 / K_I)**2)\n return pact\n\n\ndef fugacity(IPTG, R, Ns, e_s, K_A=139E-6, K_I=0.53E-6, e_AI=4.5, Nc=0, e_c=0):\n '''\n Solves for the fugacity of simple repression with\n multiple promoter copies (Ns, with energy e_s) or competitor sites\n (Nc, with energy e_c).\n Parameters\n ----------\n R : float\n Number of repressors per cell\n e_AI : float\n Energetic difference between the active and inactive state\n Ns : float\n Number of specific operators available for repressor binding\n Nc : float\n Number of competitor operators available for repressor binding\n e_s : float\n Binding energy between specific operator and repressor as inferred in\n Garcia 2011\n e_c : float\n Binding energy between competitor operator and repressor\n K_A : float\n Dissociation constant for active repressor\n K_I : float\n Dissociation constant for inactive repressor\n e_AI : float\n Energetic difference between the active and inactive state\n Returns\n -------\n fugacity at each IPTG concentration\n '''\n NNS = 4.6E6\n lam = []\n\n def func(x): return -Reff + Ns * (x * np.exp(-e_s)) / (1 + x * np.exp(-e_s)) +\\\n NNS * (x) / (1 + x) + \\\n Nc * (x * np.exp(-e_c)) / (1 + x * np.exp(-e_c))\n for c in IPTG:\n Reff = R * pact(c, K_A, K_I, e_AI)\n lam.append(fsolve(func, 0))\n return np.array(lam)\n\n\ndef occupancy(lam, e_s):\n '''\n Computes fold-change for simple repression using the fugacity (lam).\n Parameters\n ----------\n lam : float\n fugacity of system as calculated by fugacity()\n e_s : float\n binding energy of specific operator\n Returns\n -------\n fold-change (occupancy)\n '''\n return 1 / (1 + lam * np.exp(-(e_s)))\n\n\n# Define parameter values\nops = [-15.3, -13.9, -9.7]\nop_names = ['O1', 'O2', 'O3']\nfig_labels = ['(A)', '(B)', '(C)']\nNc = [1, 10, 50, 100, 250, 500]\nNs = [1]\nIPTG = np.logspace(-8, -2, 100)\nR = 260\ne_c = -17.0\n\n# Plot figure\nfig, ax = plt.subplots(ncols=3, sharey=False, figsize=(6, 2))\nphd.viz.despine(ax)\nfor i, a in enumerate(ax):\n for N in Nc:\n lam_array = fugacity(IPTG, R, Ns=1, e_s=ops[i], Nc=N, e_c=e_c)\n fc = occupancy(lam_array, ops[i])\n a.plot(IPTG*1E6, fc, label=N,)\n a.set_xscale('log')\n a.set_ylabel('fold-change')\n a.set_xlabel('IPTG [µM]')\n a.set_ylim(-0.01, 1.1)\n a.set_xlim(1E-2, 1E4)\n\n # Add figure text\n phd.viz.titlebox(a,r'%s $\\Delta \\varepsilon_{RA}= %0.1f\\ k_BT$' % (\n op_names[i], ops[i]), bgcolor='white', color=_['black'],\n boxsize='12%', pad=0.05, size=6)\n a.text(-0.32, 1.05, fig_labels[i], transform=a.transAxes,\n fontsize=8)\n\n# Add legend\nleg1 = ax[2].legend(title=r'$N_c$', loc='lower right', fontsize=6)\nleg1.get_title().set_fontsize(6)\nplt.tight_layout()\nplt.savefig('../figs/ch6_figS5.pdf', bbox_inches='tight')\nplt.savefig('../figs/ch6_figS5.png', bbox_inches='tight')\n\n", "sub_path": "src/chapter_06/code/ch6_figS5.py", "file_name": "ch6_figS5.py", "file_ext": "py", "file_size_in_byte": 3787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "phd.viz.viz.phd_style", "line_number": 7, "usage_type": "call"}, {"api_name": "phd.viz.viz", "line_number": 7, "usage_type": "attribute"}, {"api_name": "phd.viz", "line_number": 7, "usage_type": "name"}, {"api_name": "seaborn.set_palette", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "phd.viz.viz.despine", "line_number": 103, "usage_type": "call"}, {"api_name": "phd.viz.viz", "line_number": 103, "usage_type": "attribute"}, {"api_name": "phd.viz", "line_number": 103, "usage_type": "name"}, {"api_name": "phd.viz.viz.titlebox", "line_number": 116, "usage_type": "call"}, {"api_name": "phd.viz.viz", "line_number": 116, "usage_type": "attribute"}, {"api_name": "phd.viz", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "639915911", "text": "\"\"\"\"Indigo UI URLs\n\nCopyright 2015 Archive Analytics Solutions\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.views.generic import TemplateView\n\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'^$', 'indigo_ui.views.home', name='home'),\n url(r'^archive/', include('archive.urls', namespace=\"archive\")),\n url(r'^node/', include('nodes.urls', namespace=\"nodes\")),\n url(r'^users/', include('users.urls', namespace=\"users\")),\n url(r'^groups/', include('groups.urls', namespace=\"groups\")),\n url(r'^activity/', include('activity.urls', namespace=\"activity\")),\n\n url(r'^about$', TemplateView.as_view(template_name='about.html'), name='about'),\n url(r'^contact$', TemplateView.as_view(template_name='contact.html'), name='contact'),\n\n url(r'^api/cdmi/', include('cdmi.urls', namespace=\"cdmi\")),\n url(r'^api/admin/', include('admin.urls', namespace=\"admin\")),\n url(r'^api/triple/', include('triple_api.urls', namespace=\"triple_api\")),\n url(r'^triple/', include('triple_ui.urls', namespace=\"triple_ui\")),\n url(r'^listener/', include('listener.urls', namespace=\"listener\")),\n\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n", "sub_path": "indigo-web/indigo_ui/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 42, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "417646417", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom typing import Callable, Any\nfrom context import in3120\n\n\ndef data_path(filename: str):\n return \"../data/\" + filename\n\n\ndef simple_repl(prompt: str, evaluator: Callable[[str], Any]):\n from timeit import default_timer as timer\n import pprint\n printer = pprint.PrettyPrinter()\n escape = \"!\"\n print(f\"Enter '{escape}' to exit.\")\n while True:\n print(f\"{prompt}>\", end=\"\")\n query = input()\n if query == escape:\n break\n start = timer()\n matches = evaluator(query)\n end = timer()\n printer.pprint(matches)\n print(f\"Evaluation took {end - start} seconds.\")\n\n\ndef repl_a():\n print(\"Building inverted index from Cranfield corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n corpus = in3120.InMemoryCorpus(data_path(\"cran.xml\"))\n index = in3120.InMemoryInvertedIndex(corpus, [\"body\"], normalizer, tokenizer)\n print(\"Enter one or more index terms and inspect their posting lists.\")\n simple_repl(\"terms\", lambda ts: {t: list(index.get_postings_iterator(t)) for t in index.get_terms(ts)})\n\n\ndef repl_b_1():\n print(\"Building suffix array from Cranfield corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n corpus = in3120.InMemoryCorpus(data_path(\"cran.xml\"))\n engine = in3120.SuffixArray(corpus, [\"body\"], normalizer, tokenizer)\n options = {\"debug\": False, \"hit_count\": 5}\n print(\"Enter a prefix phrase query and find matching documents.\")\n print(f\"Lookup options are {options}.\")\n print(\"Returned scores are occurrence counts.\")\n simple_repl(\"query\", lambda q: list(engine.evaluate(q, options)))\n\n\ndef repl_b_2():\n print(\"Building trie from MeSH corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n corpus = in3120.InMemoryCorpus(data_path(\"mesh.txt\"))\n dictionary = in3120.Trie()\n dictionary.add((normalizer.normalize(normalizer.canonicalize(d[\"body\"])) for d in corpus), tokenizer)\n engine = in3120.StringFinder(dictionary, tokenizer)\n print(\"Enter some text and locate words and phrases that are MeSH terms.\")\n simple_repl(\"text\", lambda t: list(engine.scan(normalizer.normalize(normalizer.canonicalize(t)))))\n\n\ndef repl_c():\n print(\"Indexing English news corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n corpus = in3120.InMemoryCorpus(data_path(\"en.txt\"))\n index = in3120.InMemoryInvertedIndex(corpus, [\"body\"], normalizer, tokenizer)\n ranker = in3120.BrainDeadRanker()\n engine = in3120.SimpleSearchEngine(corpus, index)\n options = {\"debug\": False, \"hit_count\": 5, \"match_threshold\": 0.5}\n print(\"Enter a query and find matching documents.\")\n print(f\"Lookup options are {options}.\")\n print(f\"Tokenizer is {tokenizer.__class__.__name__}.\")\n print(f\"Ranker is {ranker.__class__.__name__}.\")\n simple_repl(\"query\", lambda q: list(engine.evaluate(q, options, ranker)))\n\n\ndef repl_d_1():\n print(\"Indexing MeSH corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.ShingleGenerator(3)\n corpus = in3120.InMemoryCorpus(data_path(\"mesh.txt\"))\n index = in3120.InMemoryInvertedIndex(corpus, [\"body\"], normalizer, tokenizer)\n ranker = in3120.BrainDeadRanker()\n engine = in3120.SimpleSearchEngine(corpus, index)\n options = {\"debug\": False, \"hit_count\": 5, \"match_threshold\": 0.5}\n print(\"Enter a query and find matching documents.\")\n print(f\"Lookup options are {options}.\")\n print(f\"Tokenizer is {tokenizer.__class__.__name__}.\")\n print(f\"Ranker is {ranker.__class__.__name__}.\")\n simple_repl(\"query\", lambda q: list(engine.evaluate(q, options, ranker)))\n\n\ndef repl_d_2():\n print(\"Indexing English news corpus...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n corpus = in3120.InMemoryCorpus(data_path(\"en.txt\"))\n index = in3120.InMemoryInvertedIndex(corpus, [\"body\"], normalizer, tokenizer)\n ranker = in3120.BetterRanker(corpus, index)\n engine = in3120.SimpleSearchEngine(corpus, index)\n options = {\"debug\": False, \"hit_count\": 5, \"match_threshold\": 0.5}\n print(\"Enter a query and find matching documents.\")\n print(f\"Lookup options are {options}.\")\n print(f\"Tokenizer is {tokenizer.__class__.__name__}.\")\n print(f\"Ranker is {ranker.__class__.__name__}.\")\n simple_repl(\"query\", lambda q: list(engine.evaluate(q, options, ranker)))\n\n\ndef repl_e():\n print(\"Initializing naive Bayes classifier from news corpora...\")\n normalizer = in3120.BrainDeadNormalizer()\n tokenizer = in3120.BrainDeadTokenizer()\n languages = [\"en\", \"no\", \"da\", \"de\"]\n training_set = {language: in3120.InMemoryCorpus(data_path(f\"{language}.txt\")) for language in languages}\n classifier = in3120.NaiveBayesClassifier(training_set, [\"body\"], normalizer, tokenizer)\n print(f\"Enter some text and classify it into {languages}.\")\n print(f\"Returned scores are log-probabilities.\")\n simple_repl(\"text\", lambda t: list(classifier.classify(t)))\n\n\ndef main():\n repls = {\"a\": repl_a,\n \"b-1\": repl_b_1,\n \"b-2\": repl_b_2,\n \"c\": repl_c,\n \"d-1\": repl_d_1,\n \"d-2\": repl_d_2,\n \"e\": repl_e}\n targets = sys.argv[1:]\n if not targets:\n print(f\"{sys.argv[0]} [{'|'.join(key for key in repls.keys())}]\")\n else:\n for target in targets:\n if target in repls:\n repls[target.lower()]()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "tests/repl.py", "file_name": "repl.py", "file_ext": "py", "file_size_in_byte": 5674, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "typing.Callable", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "pprint.PrettyPrinter", "line_number": 16, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 24, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 26, "usage_type": "call"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 33, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 33, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 34, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 34, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 35, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 35, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryInvertedIndex", "line_number": 36, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 36, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 43, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 43, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 44, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 44, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 45, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 45, "usage_type": "name"}, {"api_name": "context.in3120.SuffixArray", "line_number": 46, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 46, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 56, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 56, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 57, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 57, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 58, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 58, "usage_type": "name"}, {"api_name": "context.in3120.Trie", "line_number": 59, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 59, "usage_type": "name"}, {"api_name": "context.in3120.StringFinder", "line_number": 61, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 61, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 68, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 68, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 69, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 69, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 70, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 70, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryInvertedIndex", "line_number": 71, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 71, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadRanker", "line_number": 72, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 72, "usage_type": "name"}, {"api_name": "context.in3120.SimpleSearchEngine", "line_number": 73, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 73, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 84, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 84, "usage_type": "name"}, {"api_name": "context.in3120.ShingleGenerator", "line_number": 85, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 85, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 86, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 86, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryInvertedIndex", "line_number": 87, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 87, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadRanker", "line_number": 88, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 88, "usage_type": "name"}, {"api_name": "context.in3120.SimpleSearchEngine", "line_number": 89, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 89, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 100, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 100, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 101, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 101, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 102, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 102, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryInvertedIndex", "line_number": 103, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 103, "usage_type": "name"}, {"api_name": "context.in3120.BetterRanker", "line_number": 104, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 104, "usage_type": "name"}, {"api_name": "context.in3120.SimpleSearchEngine", "line_number": 105, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 105, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadNormalizer", "line_number": 116, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 116, "usage_type": "name"}, {"api_name": "context.in3120.BrainDeadTokenizer", "line_number": 117, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 117, "usage_type": "name"}, {"api_name": "context.in3120.InMemoryCorpus", "line_number": 119, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 119, "usage_type": "name"}, {"api_name": "context.in3120.NaiveBayesClassifier", "line_number": 120, "usage_type": "call"}, {"api_name": "context.in3120", "line_number": 120, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "56631208", "text": "\"\"\"empty message\n\nRevision ID: 840daf4878a2\nRevises: 6fb829e3b6f1\nCreate Date: 2019-02-06 15:44:35.760937\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '840daf4878a2'\ndown_revision = '6fb829e3b6f1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('customer',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('reception_address', sa.String(), nullable=True),\n sa.Column('credit_card_number', sa.String(), nullable=True),\n sa.Column('discount', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('department',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('description', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('employee',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('department_id', sa.Integer(), nullable=True),\n sa.Column('account_number', sa.String(), nullable=True),\n sa.Column('charge', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['department_id'], ['department.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('account_number')\n )\n op.drop_constraint('user_name_key', 'user', type_='unique')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('user_name_key', 'user', ['name'])\n op.drop_table('employee')\n op.drop_table('department')\n op.drop_table('customer')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/840daf4878a2_.py", "file_name": "840daf4878a2_.py", "file_ext": "py", "file_size_in_byte": 2079, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op.drop_constraint", "line_number": 49, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 49, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 55, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 55, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 56, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 56, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 57, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 57, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 58, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "142022268", "text": "'''Module used to communicate and interact with player.\n import as c'''\n\nimport pygame\nfrom pygame.locals import *\nimport globvar as g\nimport utilities as u\n\n\nclass Interaction:\n '''Class representing an interaction with the player'''\n\n def __init__(self, interaction_type, question_str, text_str, min_char_count=0):\n self.type = interaction_type\n self.question_str = question_str\n self.text_str = text_str\n self.min_char_count = min_char_count\n\n def run(self, target, background, notification=False, insist_answer=False):\n inside_rect = (10, 10, 680, 355)\n line_pos = (130, 170)\n question_pos = (90, 75)\n question_size = (500, 100)\n\n if notification is False:\n text_pos = (110, 270)\n text_size = (500, 80)\n else:\n text_pos = (110, 150)\n text_size = (500, 200)\n\n yes_pos = (175, 170)\n no_pos = (435, 170)\n\n yes_rect = Rect((300, 340), (100, 60))\n no_rect = Rect((550, 340), (80, 60))\n\n enter_pos = (192, 210)\n esc_pos = (430, 210)\n char_count_pos = (353, 220)\n\n surface_size = (700, 375)\n surface_pos = (130, 172)\n\n surface = pygame.Surface(surface_size)\n if background is None:\n background = target.copy()\n overlay = pygame.Surface((g.WINDOWWIDTH, g.WINDOWHEIGHT))\n overlay.set_alpha(180)\n overlay.fill((0, 0, 0))\n background.blit(overlay, (0, 0))\n\n target.blit(background, (0, 0))\n\n # inside\n surface.fill((0, 0, 0))\n pygame.draw.rect(surface, (255, 255, 255), inside_rect)\n # question\n question = u.Text(self.question_str, g.BLACK,\n g.communications_option_font, question_size)\n question_surf = g.communications_option_font.render(\n self.question_str, True, g.BLACK)\n question_size = question_surf.get_size()\n question_len = question_size[0]\n question_x = (inside_rect[2] - question_len) // 2\n question.render(surface, (question_x, question_pos[1]))\n # text\n text = u.Text(self.text_str, g.BLACK, g.another, text_size)\n text.render(surface, text_pos)\n\n default_cursor, pointer_cursor = u.cursors()\n\n if notification is False:\n if self.min_char_count != 0:\n min_char_warning = u.Text('(nejméně ' + str(\n self.min_char_count) + ' znaky / znaků)', g.BLACK, g.communications_hint_font)\n min_char_warning.render(surface, char_count_pos)\n\n if self.type == 'text':\n # line\n line = u.Text('_ _ _ _ _ _ _ _ _ _ _',\n g.BLACK, g.communications_option_font)\n line.render(surface, line_pos)\n else:\n # options\n yes = u.Text('ANO', g.BLACK, g.communications_option_font)\n yes.render(surface, yes_pos)\n no = u.Text('NE', g.BLACK, g.communications_option_font)\n no.render(surface, no_pos)\n\n # hints\n enter = u.Text('(ENTER)', g.BLACK, g.communications_hint_font)\n esc = u.Text('(ESCAPE)', g.BLACK, g.communications_hint_font)\n enter.render(surface, enter_pos)\n esc.render(surface, esc_pos)\n\n target.blit(surface, surface_pos)\n\n if notification is True:\n\n while True:\n u.checkForQuit()\n for event in pygame.event.get():\n if event.type == MOUSEBUTTONUP:\n x, y = event.pos\n if not Rect(surface_pos, surface_size).collidepoint((x, y)):\n return\n if event.type == KEYUP:\n if event.key == K_RETURN or event.key == K_ESCAPE:\n return\n\n pygame.display.update()\n g.FPSCLOCK.tick(g.FPS)\n pygame.event.pump()\n\n elif self.type == 'text':\n\n exit_dialogue = False\n user_input = ''\n last_chr = -1\n text_input = u.Text(user_input, g.BLACK,\n g.communications_option_font)\n\n while True:\n changed = False\n\n u.checkForQuit()\n for event in pygame.event.get():\n if event.type == KEYUP:\n changed = True\n # uppercase\n if pygame.key.get_mods() & KMOD_SHIFT or pygame.key.get_mods() & KMOD_CAPS:\n if pygame.key.name(event.key) in g.ALPHABET:\n user_input = user_input + \\\n chr(event.key).upper()\n last_chr += 1\n # lowercase\n elif pygame.key.name(event.key) in g.ALPHABET:\n user_input = user_input + chr(event.key)\n last_chr += 1\n # other\n elif event.key == K_SPACE:\n user_input = user_input + ' '\n last_chr += 1\n elif event.key == K_BACKSPACE:\n if last_chr != -1:\n user_input = user_input[:last_chr]\n last_chr -= 1\n elif event.key == K_RETURN:\n if len(user_input) >= self.min_char_count:\n return user_input\n if event.type == MOUSEBUTTONUP:\n x, y = event.pos\n if not Rect(surface_pos, surface_size).collidepoint((x, y)):\n return\n\n if changed is True:\n pygame.draw.rect(surface, (255, 255, 255),\n (line_pos, (400, 50)))\n line.render(surface, line_pos)\n text_input = u.Text(user_input, g.BLACK,\n g.communications_option_font)\n text_input.render(surface, line_pos)\n target.blit(surface, (130, 172))\n pygame.display.update()\n g.FPSCLOCK.tick(g.FPS)\n pygame.event.pump()\n\n else:\n while True:\n u.checkForQuit()\n for event in pygame.event.get():\n if event.type == KEYUP:\n if event.key == K_RETURN:\n return True\n elif event.key == K_ESCAPE:\n return False\n if event.type == MOUSEBUTTONUP:\n x, y = event.pos\n if insist_answer is False:\n if not Rect(surface_pos, surface_size).collidepoint((x, y)):\n return\n if yes_rect.collidepoint((x, y)):\n u.change_cursor(\n default_cursor, pointer_cursor, 'default')\n return True\n elif no_rect.collidepoint((x, y)):\n u.change_cursor(\n default_cursor, pointer_cursor, 'default')\n return False\n elif event.type == MOUSEMOTION:\n pointer = False\n x, y = event.pos\n if yes_rect.collidepoint((x, y)):\n pointer = True\n u.change_cursor(\n default_cursor, pointer_cursor, 'pointer')\n elif no_rect.collidepoint((x, y)):\n pointer = True\n u.change_cursor(\n default_cursor, pointer_cursor, 'pointer')\n if pointer is False:\n u.change_cursor(\n default_cursor, pointer_cursor, 'default')\n\n pygame.display.update()\n g.FPSCLOCK.tick(g.FPS)\n\n return user_input\n\n\ndef session(target, background, return_value, conditioned, dialogue_list, insist_answer=False):\n '''Runs several Interactions'''\n if background is None:\n background = target.copy()\n overlay = pygame.Surface((g.WINDOWWIDTH, g.WINDOWHEIGHT))\n overlay.set_alpha(180)\n overlay.fill((0, 0, 0))\n background.blit(overlay, (0, 0))\n target.blit(background, (0, 0))\n returned_values = []\n\n notification = True if return_value is None else False\n\n for dialogue in dialogue_list:\n returned_values.append(dialogue.run(\n target, background, notification, insist_answer))\n if conditioned is True and returned_values[0] is False:\n return\n if len(returned_values) > 0 and returned_values[-1] is None:\n return\n\n if return_value is None:\n return\n elif return_value == 'all':\n return returned_values\n else:\n return returned_values[return_value]\n\n\n# Tests\nif __name__ == '__main__':\n global DISPLAYSURF\n DISPLAYSURF = pygame.display.set_mode(\n (g.WINDOWWIDTH, g.WINDOWHEIGHT))\n DISPLAYSURF.fill((255, 255, 255))\n\n close = Interaction('yes_no', 'Odejít do hlavního menu?',\n 'Veškerý neuložený obsah bude smazán!\\nHerní postup se uloží po ukončení kapitoly.')\n start = Interaction('yes_no', 'Spustit novou hru',\n 'Váš dosavadní postup bude přepsán. Přejete si pokračovat?')\n name = Interaction('text', 'Pojmenuj svého hrdinu:', '', 4)\n\n print(session(DISPLAYSURF, None, 2, True, [start, name, close]))\n\n notif = Interaction(\n 'text', 'Konec hry', \"Dokončil jsi příběhovou kampaň. Pokud chceš začít novou hru, klikni na tlačítko 'Nová hra'.\")\n\n session(DISPLAYSURF, None, None, False, [notif])\n", "sub_path": "Game/communication.py", "file_name": "communication.py", "file_ext": "py", "file_size_in_byte": 10199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pygame.Surface", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 48, "usage_type": "call"}, {"api_name": "globvar.WINDOWWIDTH", "line_number": 48, "usage_type": "attribute"}, {"api_name": "globvar.WINDOWHEIGHT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 59, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 59, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 60, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font.render", "line_number": 61, "usage_type": "call"}, {"api_name": "globvar.communications_option_font", "line_number": 61, "usage_type": "attribute"}, {"api_name": "globvar.BLACK", "line_number": 62, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 68, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 68, "usage_type": "attribute"}, {"api_name": "globvar.another", "line_number": 68, "usage_type": "attribute"}, {"api_name": "utilities.cursors", "line_number": 71, "usage_type": "call"}, {"api_name": "utilities.Text", "line_number": 75, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 76, "usage_type": "attribute"}, {"api_name": "globvar.communications_hint_font", "line_number": 76, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 81, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 82, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 82, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 86, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 86, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 86, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 88, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 88, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 88, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 92, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 92, "usage_type": "attribute"}, {"api_name": "globvar.communications_hint_font", "line_number": 92, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 93, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 93, "usage_type": "attribute"}, {"api_name": "globvar.communications_hint_font", "line_number": 93, "usage_type": "attribute"}, {"api_name": "utilities.checkForQuit", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 112, "usage_type": "attribute"}, {"api_name": "globvar.FPSCLOCK.tick", "line_number": 113, "usage_type": "call"}, {"api_name": "globvar.FPSCLOCK", "line_number": 113, "usage_type": "attribute"}, {"api_name": "globvar.FPS", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.event.pump", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 114, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 121, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 121, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 122, "usage_type": "attribute"}, {"api_name": "utilities.checkForQuit", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.key.get_mods", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 133, "usage_type": "attribute"}, {"api_name": "globvar.ALPHABET", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 138, "usage_type": "attribute"}, {"api_name": "globvar.ALPHABET", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 158, "usage_type": "attribute"}, {"api_name": "utilities.Text", "line_number": 161, "usage_type": "call"}, {"api_name": "globvar.BLACK", "line_number": 161, "usage_type": "attribute"}, {"api_name": "globvar.communications_option_font", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 165, "usage_type": "attribute"}, {"api_name": "globvar.FPSCLOCK.tick", "line_number": 166, "usage_type": "call"}, {"api_name": "globvar.FPSCLOCK", "line_number": 166, "usage_type": "attribute"}, {"api_name": "globvar.FPS", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.event.pump", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 167, "usage_type": "attribute"}, {"api_name": "utilities.checkForQuit", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 172, "usage_type": "attribute"}, {"api_name": "utilities.change_cursor", "line_number": 184, "usage_type": "call"}, {"api_name": "utilities.change_cursor", "line_number": 188, "usage_type": "call"}, {"api_name": "utilities.change_cursor", "line_number": 196, "usage_type": "call"}, {"api_name": "utilities.change_cursor", "line_number": 200, "usage_type": "call"}, {"api_name": "utilities.change_cursor", "line_number": 203, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 206, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 206, "usage_type": "attribute"}, {"api_name": "globvar.FPSCLOCK.tick", "line_number": 207, "usage_type": "call"}, {"api_name": "globvar.FPSCLOCK", "line_number": 207, "usage_type": "attribute"}, {"api_name": "globvar.FPS", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 216, "usage_type": "call"}, {"api_name": "globvar.WINDOWWIDTH", "line_number": 216, "usage_type": "attribute"}, {"api_name": "globvar.WINDOWHEIGHT", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 244, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 244, "usage_type": "attribute"}, {"api_name": "globvar.WINDOWWIDTH", "line_number": 245, "usage_type": "attribute"}, {"api_name": "globvar.WINDOWHEIGHT", "line_number": 245, "usage_type": "attribute"}]} +{"seq_id": "122958924", "text": "import logging\nfrom gym.envs.registration import register\n\nlogger = logging.getLogger(__name__)\n\nregister(\n id='Qubit-v0',\n entry_point='gym_qubit.envs:TransmonEnv',\n timestep_limit=1000,\n reward_threshold=1.0,\n nondeterministic=False,\n)", "sub_path": "gym_qubit/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "gym.envs.registration.register", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "602885216", "text": "from pprint import pprint\n\nimport json\nimport constants as constants\n\nclass ReferenceUtil(object):\n\tdef GetAbilityReferenceDict(self, abilityRefPath):\n\t\t# Convert ability json file to ability json object\n\t\twith open(abilityRefPath, 'r', encoding='utf-8-sig') as json_file:\n\t\t\tabilityRefJson = json_file.read()\n\t\t\tabilityRefObj = json.loads(abilityRefJson)\n\t\t# Convert ability json object to ability dictionary\n\t\tabilityRefDict = {} # {5630, 'Phoenix-Ult'}\n\t\tfor abilityPair in abilityRefObj[constants.ABILITY_JSON_TITLE_KEY]:\n\t\t\tabilityRefDict[int(abilityPair[constants.REFERENCE_JSON_ID])] = abilityPair[constants.REFERENCE_JSON_NAME]\n\t\treturn abilityRefDict\n\n\tdef GetHeroReferenceDict(self, heroRefPath):\n\t\t# Convert hero json file to hero json object\n\t\twith open(heroRefPath, 'r', encoding='utf-8-sig') as json_file:\n\t\t\theroModelRefJson = json_file.read()\n\t\t\theroModelRefObj = json.loads(heroModelRefJson)\n\t\t# Convert hero json object to hero dictionary\n\t\theroRefDict = {} # {}\n\t\tfor heroPair in heroModelRefObj[constants.HERO_JSON_TITLE_KEY]:\n\t\t\theroRefDict[int(heroPair[constants.REFERENCE_JSON_ID])] = heroPair[constants.REFERENCE_JSON_NAME]\n\t\treturn heroRefDict\n", "sub_path": "reference_util.py", "file_name": "reference_util.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "constants.ABILITY_JSON_TITLE_KEY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "constants.REFERENCE_JSON_ID", "line_number": 15, "usage_type": "attribute"}, {"api_name": "constants.REFERENCE_JSON_NAME", "line_number": 15, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "constants.HERO_JSON_TITLE_KEY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "constants.REFERENCE_JSON_ID", "line_number": 26, "usage_type": "attribute"}, {"api_name": "constants.REFERENCE_JSON_NAME", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "64330095", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('claims', '0004_invoice_taxes'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='invoice',\n name='client_type',\n field=models.CharField(default=b'EC', max_length=2, verbose_name=b'Bill To', choices=[(b'EC', b'Customer'), (b'CD', b'Custom')]),\n ),\n migrations.AlterField(\n model_name='invoice',\n name='notes',\n field=models.CharField(max_length=255, null=True, verbose_name=b'Note in Header', blank=True),\n ),\n ]\n", "sub_path": "claims/migrations/0005_auto_20151221_0904.py", "file_name": "0005_auto_20151221_0904.py", "file_ext": "py", "file_size_in_byte": 702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "549900055", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThe BBMQ server is required to accept 2 connections. one from the producer and one from the consumer.\n\nEach topic will have one queue. Topic is an abstraction basically for a queue. The name 'Topic' is inspired from apache kafka\n\nProducer: Publisher of the messages\nConsumer: Subscriber of the messages\n\nCommunication between the connection thread and the main thread. It uses a simple queue for communication purposes. Whenever a new connection is established, its details will be stored in the queue, for whatever number of new connections\n\n\"\"\"\n\nimport socket\nimport logging, logging.config\nimport threading\nimport sys, os\nimport ast\nimport traceback\nimport Queue\nimport datetime\nimport signal\n\n# --------------------------Custom imports------------------------------------------\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport settings\nfrom bbmq import BBMQ\nfrom partition_messages import Message\nfrom message import BaseMessage\n\nUSE_DB = settings.USE_DB\n\nif USE_DB:\n import models\n from models import ModelManager\n from models import Queue as QueueModel\n from models import Message as MessageModel\n\nLOG_FILEPATH = settings.LOG_FILEPATH\nLOG_LEVEL = settings.LOG_LEVEL\nSERVER_MAX_QUEUED_CON = settings.SERVER_MAX_QUEUED_CON\nTOPICS = settings.TOPICS\nCLIENT_PUBLISHER = settings.CLIENT_PUBLISHER\nCLIENT_SUBSCRIBER = settings.CLIENT_SUBSCRIBER\nMAX_MESSAGE_SIZE = settings.MAX_MESSAGE_SIZE\nSERVER_ACKNOWLEDGEMENT = settings.SERVER_ACKNOWLEDGEMENT\nCLIENT_SHUTDOWN_SIGNAL = settings.CLIENT_SHUTDOWN_SIGNAL\nCONSUMER_REQUEST_WORD = settings.CONSUMER_REQUEST_WORD\nINVALID_PROTOCOL = settings.INVALID_PROTOCOL\nEMPTY_QUEUE_MESSAGE = settings.EMPTY_QUEUE_MESSAGE\nPRODUCER_ACK_MESSAGE = settings.PRODUCER_ACK_MESSAGE\nCLOSE_CONNECTION_SIGNAL = settings.CLOSE_CONNECTION_SIGNAL\n\nHEAD = settings.HEAD\nTAIL = settings.TAIL\n\nPARTITION_SIZE = settings.PARTITION_SIZE\n\nlogging.config.dictConfig(settings.LOGGING)\nlogger = logging.getLogger(\"bbmq_server_module\")\n\n\nclass ProducerThread(threading.Thread):\n \"\"\"\n Connection thread will be waiting for connections from producers or consumers\n \"\"\"\n def __init__(self, producer_socket, inbound_socket_address, queue, topic_name):\n \"\"\"\n initialize the thread. During initialization of this thread, it must confirm to the\n producer that the producer can now start communication\n :param producer_socket:\n :param inbound_socket_address:\n :param queue:\n :param topic_name:\n \"\"\"\n threading.Thread.__init__(self)\n self.logger = logging.getLogger(\"ProducerThread\")\n self.logger.debug(\"Initializing Producer Thread for socket adddress: {}\".format(\n inbound_socket_address))\n self.socket = producer_socket\n self.queue = queue\n self.topic_name = topic_name\n self.socket.send(SERVER_ACKNOWLEDGEMENT)\n if USE_DB:\n self.session = ModelManager.create_session(models.engine)\n self.queue_object = self.session.query(QueueModel).filter(QueueModel.name == topic_name).first()\n\n def run(self):\n \"\"\"\n run the thread. called when the start() method of Thread super class is called\n :return:\n \"\"\"\n msg = None\n msg_body = None\n try:\n while True:\n try:\n # The Queue will only store the message and thats all.\n msg = BaseMessage(message=\"\")\n msg_body = BaseMessage(message=\"\")\n while True:\n part = self.socket.recv(PARTITION_SIZE)\n msg.append(part)\n self.logger.debug(\"message now: \")\n self.logger.debug(msg)\n\n has_tail, message_tail = msg.has_message_tail()\n has_head, message_head = msg.has_message_head()\n\n if has_tail:\n self.logger.debug(\"TAIL received for message\")\n msg_body.append(message_tail)\n break\n elif has_head:\n self.logger.debug(\"HEAD received for message\")\n\n if msg_body.equals(CLIENT_SHUTDOWN_SIGNAL):\n logger.info(\"CLIENT_SHUTDOWN_SIGNAL recieved\")\n logger.info(\"Closing the connection with the producer\")\n\n self.logger.debug(\"Packetizing CLOSE_CONNECTION_SIGNAL\")\n close_con_signal = Message(CLOSE_CONNECTION_SIGNAL)\n for packet in close_con_signal:\n self.socket.send(packet)\n del(close_con_signal)\n break\n else:\n self.logger.debug(\"Received payload\")\n\n if USE_DB:\n self.queue_object.message.append(MessageModel(is_fetched=False, content=msg_body,\n publish_timestamp=datetime.datetime.utcnow(),\n consumed_timestamp=datetime.datetime.utcnow()))\n ModelManager.commit_session(self.session)\n self.logger.info(\"Written to database\")\n\n self.logger.debug(\"Publishing to queue\")\n\n # The message is simply added to the queue\n self.logger.debug(\"Enqueuing message: \" )\n self.logger.debug(msg_body)\n self.queue.add_message(msg_body)\n\n self.logger.info(\"Sending producer acknowledgement\")\n\n self.logger.debug(\"Packetizing PRODUCER_ACK_MESSAGE\")\n producer_ack_message = Message(PRODUCER_ACK_MESSAGE)\n for packet in producer_ack_message:\n self.socket.send(packet)\n\n except Exception:\n stack = traceback.format_exc()\n self.logger.error(stack)\n raise Exception\n\n except Exception:\n self.logger.error(\"Socket Error. Check the logs to know more\")\n exc_type, exc_val, exc_tb = sys.exc_info()\n stack = traceback.format_exc()\n self.logger.error(stack)\n traceback.print_exception(exc_type, exc_val, exc_tb)\n\n finally:\n self.logger.debug(\"Deleting msg_body and msg if exists\")\n if msg:\n del(msg)\n if msg_body:\n del(msg_body)\n\n if USE_DB:\n self.logger.info(\"Closing database session\")\n ModelManager.close_session(self.session)\n\n self.logger.info(\"Closing socket: {} for queue: {}\".format(self.socket,\n self.topic_name))\n self.socket.close()\n\n self.logger.info(\"Killing Producer Thread for socket: {} and queue: {}\".format(\n self.socket, self.topic_name))\n\n\nclass ConsumerThread(threading.Thread):\n \"\"\"\n Connection thread will be waiting for connections from producers or consumers\n \"\"\"\n def __init__(self, consumer_socket, inbound_socket_address, queue, topic_name):\n \"\"\"\n initialize the thread\n :param consumer_socket:\n :param inbound_socket_address:\n :param queue:\n :param topic_name:\n \"\"\"\n threading.Thread.__init__(self)\n self.logger = logging.getLogger(\"ConsumerThread\")\n self.logger.debug(\"Initializing Consumer Thread for socket address: {}\".format(\n inbound_socket_address))\n self.socket = consumer_socket\n self.queue = queue\n self.topic_name = topic_name\n self.socket.send(SERVER_ACKNOWLEDGEMENT)\n\n def run(self):\n \"\"\"\n run the thread. called when the start() method of Thread super class is called\n :return:\n \"\"\"\n msg = None\n msg_body = None\n try:\n while True:\n try:\n msg = BaseMessage(message=\"\")\n msg_body = BaseMessage(message=\"\")\n\n while True:\n part = self.socket.recv(PARTITION_SIZE)\n msg.append(part)\n has_tail, msg_tail = msg.has_message_tail()\n has_head, msg_head = msg.has_message_head()\n\n if has_tail:\n self.logger.debug(\"TAIL received for message\")\n msg_body.append(msg_tail)\n break\n elif has_head:\n self.logger.debug(\"HEAD received for message\")\n\n if msg_body.equals(CLIENT_SHUTDOWN_SIGNAL):\n self.logger.info(\"CLIENT_SHUTDOWN_SIGNAL recieved\")\n # the close connection signal has to be sent using packets\n packets = Message(CLOSE_CONNECTION_SIGNAL)\n self.logger.info(\"Sending CLOSE_CONNECTION_SIGNAL\")\n self.logger.debug(\"Packetizing CLOSE_CONNECTION_SIGNAL\")\n for packet in packets:\n self.socket.send(packet)\n break\n\n if msg_body.equals(CONSUMER_REQUEST_WORD):\n self.logger.debug(\"Received request for new message\")\n self.logger.debug(\"Fetching from queue\")\n queue_message = self.queue.fetch_message(block=True)\n queue_message = Message(message=str(queue_message))\n self.logger.info(\"Dequeued message: \" + str(queue_message))\n self.logger.debug(\"Packetizing message from queue\")\n for packet in queue_message:\n self.socket.send(packet)\n\n # TODO: Add response from client after receiving message\n # TODO: Store the message id of the message in the queue for proper replacement\n\n if USE_DB:\n self.logger.info(\"Updating database\")\n self.logger.info(\"Starting session\")\n self.session = ModelManager.create_session(models.engine)\n self.queue_object = self.session.query(QueueModel).filter(QueueModel.name ==\n self.topic_name).first()\n\n message_objs = self.session.query(MessageModel).filter(MessageModel.content.ilike(str(\n queue_message))).all()\n\n for message_obj in message_objs:\n if not message_obj.is_fetched:\n message_obj.is_fetched = True\n break\n ModelManager.commit_session(self.session)\n self.logger.info(\"Database updated\")\n self.logger.info(\"Closing database session\")\n ModelManager.close_session(self.session)\n\n else:\n self.socket.send(HEAD)\n self.socket.send(INVALID_PROTOCOL)\n self.socket.send(TAIL)\n\n except Exception:\n stack = traceback.format_exc()\n self.logger.error(stack)\n raise Exception\n\n except Exception:\n self.logger.error(\"Socket Error. Check the logs to know more\")\n exc_type, exc_val, exc_tb = sys.exc_info()\n stack = traceback.format_exc()\n self.logger.error(stack)\n traceback.print_exception(exc_type, exc_val, exc_tb)\n\n finally:\n self.logger.debug(\"Deleting msg and msg_body if exists\")\n if msg:\n del(msg)\n if msg_body:\n del(msg_body)\n\n if USE_DB:\n if self.session:\n self.logger.info(\"Closing database session\")\n ModelManager.close_session(self.session)\n\n self.logger.info(\"Closing socket: {} for queue: {}\".format(self.socket,\n self.topic_name))\n self.socket.close()\n\n self.logger.info(\"Killing Consumer Thread for socket: {} and queue: {}\".format(\n self.socket, self.topic_name))\n\n\nclass ConnectionThread(threading.Thread):\n \"\"\"\n Connection thread will be waiting for connections from producers or consumers\n \"\"\"\n\n def __init__(self, server_socket, connection_queue, topics):\n \"\"\"\n initialize the thread\n :param server_socket:\n :param connection_queue:\n :param topics: list of available topics that clients can publish/subscribe to\n \"\"\"\n threading.Thread.__init__(self)\n self.logger = logging.getLogger(\"ConnectionThread\")\n self.sock = server_socket\n self.connection_queue = connection_queue\n self.topics = topics\n\n def run(self):\n \"\"\"\n run the thread. called when the start() method of Thread super class is called\n :return:\n \"\"\"\n while True:\n client_metadata, socket_connection, inbound_socket_address = self.connect()\n\n # client_metadata is a string representation of a dictionary containing 2 fields\n # one for \"type\" which can be a producer or consumer and another being\n # \"topic\" specifying the topic the client wants to publish/subscribe\n try:\n client_type = ast.literal_eval(client_metadata)[\"type\"]\n client_topic = ast.literal_eval(client_metadata)[\"topic\"]\n if client_topic not in self.topics:\n self.logger.info(\"Client '{}' has subscribed to a non-existing\"\n \" topic {}\".format(inbound_socket_address, client_topic))\n socket_connection.close()\n continue\n if client_type == CLIENT_PUBLISHER:\n self.logger.info(\"Client is a producer and will publish to queue:\"\n \" {}\".format(client_topic))\n elif client_type == CLIENT_SUBSCRIBER:\n self.logger.info(\"Client is a consumer and will subscribe to queue:\"\n \" {}\".format(client_topic))\n else:\n self.logger.info(\"Client type not defined. Closing the connection\")\n socket_connection.close()\n continue\n self.logger.debug(\"Client data pushed to connection queue\")\n self.connection_queue.put({\n \"client_type\": client_type,\n \"client_topic\": client_topic,\n \"socket\": socket_connection,\n \"inbound_socket_address\": inbound_socket_address\n })\n\n except Exception:\n self.logger.error(\"Error in Connection Thread. Check the logs for the\"\n \" Traceback\")\n exc_type, exc_val, exc_tb = sys.exc_info()\n stack = traceback.format_exc()\n self.logger.error(stack)\n traceback.print_exception(exc_type, exc_val, exc_tb)\n\n def join(self, timeout=None):\n \"\"\"\n join the thread after closing the socket\n :param timeout:\n :return:\n \"\"\"\n self.logger.info(\"Closing Server socket\")\n self.sock.close()\n threading.Thread.join()\n\n def connect(self):\n \"\"\"\n connect to the socket\n :return:\n \"\"\"\n # the return value of accept() is a tuple c, addr where c is a new socket object\n # usable to send and receive data on the other end of the connection and addr is the\n # address bound to the socket at the other end of the connection\n self.logger.info(\"Waiting for connection from clients\")\n socket_connection, inbound_socket_address = self.sock.accept()\n # client_type can be a producer or a consumer\n client_metadata = socket_connection.recv(1024)\n self.logger.info(\"Connection received from client: {}\".format(inbound_socket_address))\n return client_metadata, socket_connection, inbound_socket_address\n\n\nclass BBMQServer(object):\n \"\"\"\n BBMQ server to connect to\n \"\"\"\n\n def __init__(self):\n \"\"\"\n initialize the instance of BBMQ. create the socket, bind the hostname and port with\n the socket and listen for the connections to the socket\n \"\"\"\n self.logger = logging.getLogger(\"bbmq_server_module\")\n self.sock = socket.socket()\n self.hostname = socket.gethostname()\n self.port = settings.PORT\n self.sock.bind((self.hostname, self.port))\n self.sock.listen(SERVER_MAX_QUEUED_CON)\n self.topics = {}\n self.connection_thread = None\n self.connection_queue = Queue.Queue()\n # store the instances of all the threads.\n self.all_client_threads = {\n \"connection_threads\":[],\n \"producer_threads\": [],\n \"consumer_threads\": []\n }\n\n def create_topic(self, topic_name):\n \"\"\"\n create a new topic with the name. returns -1 if the topic is already available\n :param topic_name:\n :return:\n \"\"\"\n if topic_name in self.topics.keys():\n return -1\n self.logger.info(\"creating topic: {}\".format(topic_name))\n self.topics[topic_name] = {\n \"queue\": None,\n \"producers\": [],\n \"consumers\": []\n }\n return 0\n\n def get_topic_queue(self, topic_name):\n \"\"\"\n gets the queue instance for a topic\n :param topic_name:\n :return:\n \"\"\"\n if topic_name not in self.topics.keys():\n return -1\n return self.topics[topic_name][\"queue\"]\n\n def update_topic(self, topic_name, producer, consumer):\n \"\"\"\n update the topic with new producers and consumers\n :param topic_name:\n :param producers: tuple ()\n :param consumers: tuple ()\n :return:\n \"\"\"\n if producer == None:\n self.topics[topic_name][\"consumers\"].append(consumer)\n else:\n self.topics[topic_name][\"producers\"].append(producer)\n\n def create_queue(self):\n \"\"\"\n create an custom queue instance and return it\n :return:\n \"\"\"\n queue = BBMQ()\n return queue\n\n def spawn_connection_thread(self):\n \"\"\"\n This method will spawn a thread to listen for new connections from new producers or\n consumers\n :return:\n \"\"\"\n self.logger.debug(\"Starting connection thread\")\n self.connection_thread = ConnectionThread(self.sock, self.connection_queue,\n self.topics.keys())\n self.all_client_threads[\"connection_threads\"].append(self.connection_thread)\n self.connection_thread.start()\n\n def spawn_producer_thread(self, producer_socket, inbound_socket_address, queue,\n topic_name):\n \"\"\"\n spawns a producer thread to publish to the queue\n :param inbound_socket_address:\n :param queue:\n :return:\n \"\"\"\n producer_thread = ProducerThread(producer_socket, inbound_socket_address, queue,\n topic_name)\n self.logger.debug(\"Starting producer thread for socket: {} and queue: {}\".format(\n inbound_socket_address, queue))\n self.all_client_threads[\"producer_threads\"].append(producer_thread)\n producer_thread.start()\n\n def spawn_consumer_thread(self, consumer_socket, inbound_socket_address, queue,\n topic_name):\n \"\"\"\n spawns a consumer thread to subscribe to the queue\n :param inbound_socket_address:\n :param queue:\n :return:\n \"\"\"\n consumer_thread = ConsumerThread(consumer_socket, inbound_socket_address, queue,\n topic_name)\n self.logger.debug(\"Starting consumer thread for socket: {} and queue: {}\".format(\n inbound_socket_address, queue))\n self.all_client_threads[\"consumer_threads\"].append(consumer_thread)\n consumer_thread.start()\n\n def join_connection_thread(self):\n \"\"\"\n join the connection thread\n :return:\n \"\"\"\n self.logger.debug(\"Joining Connection thread\")\n self.connection_thread.join()\n", "sub_path": "bbmq/server/bbmq_server.py", "file_name": "bbmq_server.py", "file_ext": "py", "file_size_in_byte": 21218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}, {"api_name": "settings.USE_DB", "line_number": 35, "usage_type": "attribute"}, {"api_name": "settings.LOG_FILEPATH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "settings.LOG_LEVEL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "settings.SERVER_MAX_QUEUED_CON", "line_number": 45, "usage_type": "attribute"}, {"api_name": "settings.TOPICS", "line_number": 46, "usage_type": "attribute"}, {"api_name": "settings.CLIENT_PUBLISHER", "line_number": 47, "usage_type": "attribute"}, {"api_name": "settings.CLIENT_SUBSCRIBER", "line_number": 48, "usage_type": "attribute"}, {"api_name": "settings.MAX_MESSAGE_SIZE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "settings.SERVER_ACKNOWLEDGEMENT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "settings.CLIENT_SHUTDOWN_SIGNAL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "settings.CONSUMER_REQUEST_WORD", "line_number": 52, "usage_type": "attribute"}, {"api_name": "settings.INVALID_PROTOCOL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "settings.EMPTY_QUEUE_MESSAGE", "line_number": 54, "usage_type": "attribute"}, {"api_name": "settings.PRODUCER_ACK_MESSAGE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "settings.CLOSE_CONNECTION_SIGNAL", "line_number": 56, "usage_type": "attribute"}, {"api_name": "settings.HEAD", "line_number": 58, "usage_type": "attribute"}, {"api_name": "settings.TAIL", "line_number": 59, "usage_type": "attribute"}, {"api_name": "settings.PARTITION_SIZE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging.config.dictConfig", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 63, "usage_type": "attribute"}, {"api_name": "settings.LOGGING", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 64, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 67, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 80, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 80, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 81, "usage_type": "call"}, {"api_name": "models.ModelManager.create_session", "line_number": 89, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 89, "usage_type": "name"}, {"api_name": "models.engine", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.Queue", "line_number": 90, "usage_type": "argument"}, {"api_name": "models.Queue.name", "line_number": 90, "usage_type": "attribute"}, {"api_name": "message.BaseMessage", "line_number": 103, "usage_type": "call"}, {"api_name": "message.BaseMessage", "line_number": 104, "usage_type": "call"}, {"api_name": "partition_messages.Message", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Message", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "attribute"}, {"api_name": "models.ModelManager.commit_session", "line_number": 138, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 138, "usage_type": "name"}, {"api_name": "partition_messages.Message", "line_number": 151, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 156, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 162, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 163, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 165, "usage_type": "call"}, {"api_name": "models.ModelManager.close_session", "line_number": 176, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 176, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 186, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 198, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 198, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 199, "usage_type": "call"}, {"api_name": "message.BaseMessage", "line_number": 217, "usage_type": "call"}, {"api_name": "message.BaseMessage", "line_number": 218, "usage_type": "call"}, {"api_name": "partition_messages.Message", "line_number": 236, "usage_type": "call"}, {"api_name": "partition_messages.Message", "line_number": 247, "usage_type": "call"}, {"api_name": "models.ModelManager.create_session", "line_number": 259, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 259, "usage_type": "name"}, {"api_name": "models.engine", "line_number": 259, "usage_type": "attribute"}, {"api_name": "models.Queue", "line_number": 260, "usage_type": "argument"}, {"api_name": "models.Queue.name", "line_number": 260, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 263, "usage_type": "argument"}, {"api_name": "models.Message.content.ilike", "line_number": 263, "usage_type": "call"}, {"api_name": "models.Message.content", "line_number": 263, "usage_type": "attribute"}, {"api_name": "models.ModelManager.commit_session", "line_number": 270, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 270, "usage_type": "name"}, {"api_name": "models.ModelManager.close_session", "line_number": 273, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 273, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 281, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 287, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 288, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 290, "usage_type": "call"}, {"api_name": "models.ModelManager.close_session", "line_number": 302, "usage_type": "call"}, {"api_name": "models.ModelManager", "line_number": 302, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 312, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 324, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 324, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 325, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 342, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 343, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 370, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 371, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 373, "usage_type": "call"}, {"api_name": "threading.Thread.join", "line_number": 383, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 383, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 411, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 412, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 413, "usage_type": "call"}, {"api_name": "settings.PORT", "line_number": 414, "usage_type": "attribute"}, {"api_name": "Queue.Queue", "line_number": 419, "usage_type": "call"}, {"api_name": "bbmq.BBMQ", "line_number": 471, "usage_type": "call"}]} +{"seq_id": "639465008", "text": "#!/usr/bin/env python3\n# vim: set fileencoding=utf-8 :\nfrom __future__ import print_function\nimport sys\nimport gc\nimport resource\nimport re\nimport logging\nimport time\nimport os\nimport codecs\nimport itertools\nfrom datetime import timedelta\n\nfrom optparse import OptionParser\n\nimport numpy as np\n\nfrom scipy import sparse as sp\n\nfrom sklearn.datasets import load_files\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom lexicon import Lexicon\n\nimport nltk\nfrom nltk.corpus import mac_morpho\n\nparser = OptionParser(usage=\"%prog [options] \")\nparser.add_option(\"-e\", \"--encoding\", dest=\"encoding\", default=\"latin_1\", help=\"Dataset encoding\")\nparser.add_option(\"-r\", \"--initial-ranking\", dest=\"ranking_method\", default=\"cosine_similarity\", help=\"Initial ranking method (cosine_similarity, accuracy) Default: cosine_similarity\")\n\nif sys.stdout.encoding == None:\n\tprint(\"Fixing stdout encoding...\")\n\timport codecs\n\timport locale\n\t# Wrap sys.stdout into a StreamWriter to allow writing unicode.\n\tsys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)\n\n(options, args) = parser.parse_args()\n\n#if len(args) == 0:\n\t#parser.print_help()\n\t#sys.exit()\n\n\nclass SequentialSelection():\n\n\tdef __init__(self,tagger, tokenizer):\n\t\tself.tagger = tagger\n\t\tself.tokenizer = tokenizer\n\t\tself.corpus, self.allowedDocs = self.getDocs(\"/home/markinhos/datasets/atribuna/\")\n\n\tdef select_grammar(self, ranking):\n\t\tind = 0\n\t\tresp = 'N'\n\t\tresult = []\n\t\twhile resp != 'S' and resp != 's':\n\t\t\tif ind < len(ranking):\n\t\t\t\tprint(\"Analise as gramaticas a seguir:\")\n\t\t\t\ttkn = self.tokenizer.findall(ranking[ind][2])\n\t\t\t\tidxs = ranking[ind][3]\n\t\t\t\ttag_phrase = [self.tagger.tag([w])[0][1] for w in tkn]\n\t\t\t\tprint(\"[Pos = \"+str(ranking[ind][0])+\"]\\n\"+str(tkn[:idxs[0]])+('\\033[1m'+str(tkn[idxs[0]:idxs[1]]))+('\\033[0m'+str(tkn[idxs[1]:])))\n\t\t\t\tprint(ranking[ind][1],\" => \",str(tag_phrase[:idxs[0]])+('\\033[1m'+str(tag_phrase[idxs[0]:idxs[1]]))+('\\033[0m'+str(tag_phrase[idxs[1]:])))\n\t\t\t\tresp = input(\"Elas sao compativeis? Sim ou Nao { S | N } : \")\n\t\t\t\tif resp == 'S'or resp == 's':\n\t\t\t\t\tfor x,y in enumerate(ranking):\n\t\t\t\t\t\tif x >= ind:\n\t\t\t\t\t\t\tresult.append(y)\n\t\t\t\t\tfor x,y in enumerate(ranking):\n\t\t\t\t\t\tif x < ind:\n\t\t\t\t\t\t\tresult.append(y)\n\t\t\t\telse:\n\t\t\t\t\tind += 1\n\t\t\telse:\n\t\t\t\tprint(\"Fim da lista. Indice resetado\")\n\t\t\t\tind = 0\n\t\treturn result\n\t\n\tdef search_db_samples(self, grams):\n\t\tdbdata = []\n\t\tfor pos, phrase in enumerate(self.corpus.split(\".\")):\n\t\t\ttag_phrase = [tagger2.tag([w])[0][1] for w in tokenizer.findall(phrase)]\n\t\t\tfor i, gcan in enumerate(grams):\n\t\t\t\tidxs = self.contains(gcan,tag_phrase)\n\t\t\t\tif idxs != None:\n\t\t\t\t\tdel(grams[i])\n\t\t\t\t\tdbdata.append([pos,list(gcan), phrase, idxs])\n\t\treturn dbdata\n\n\tdef readDocument(self, source):\n\t\twith codecs.open(source, \"r\", encoding='iso-8859-1') as document:\n\t\t\treturn document.read()\n\n\tdef getDocs(self, resources):\n\t docs = os.listdir(resources)\n\t allowedDocs = []\n\t corpus = []\n\t for doc in docs:\n\t if not doc[-1] == '~':\n\t allowedDocs.append(doc)\n\t document = self.readDocument(\"{0}/{1}\".format(resources, doc))\n\t corpus.append(document)\n\t return \" \".join(corpus), allowedDocs\n\n\tdef contains(self, small, big):\n\t for i in range(len(big)-len(small)+1):\n\t for j in range(len(small)):\n\t if big[i+j] != small[j]:\n\t break\n\t else:\n\t return i, i+len(small)\n\t return None\n\nclass ElapsedFormatter():\n\t\n\tdef __init__(self):\n\t\tself.start_time = time.time()\n\t\n\tdef format(self, record):\n\t\telapsed_seconds = record.created - self.start_time\n\t\t#using timedelta here for convenient default formatting\n\t\telapsed = timedelta(seconds = elapsed_seconds)\n\t\treturn \"[%s][RAM: %.2f MB] %s\" % (str(elapsed)[:-3], (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024), record.getMessage())\n\n#add custom formatter to root logger for simple demonstration\nhandler = logging.StreamHandler()\nhandler.setFormatter(ElapsedFormatter())\nlogging.getLogger().addHandler(handler)\n\nlog = logging.getLogger('main')\nlog.setLevel(logging.DEBUG)\n\nwith open(\"input.txt\") as f:\n\ttext = f.readlines()\n\tinputs = [i.replace(\"\\n\", \"\").lower().split(\";\") for i in text]\n\tlog.info(inputs)\n\n#dataset_folder = args[0]\n\nlexicon = Lexicon(\"Portuguese (Brazil)/Dela/\")\n\ndef get_candidates(sentences):\n\tcandidates_simple = set()\n\tcandidates_med = set()\n\tcandidates_full = set()\n\ttokenizer = re.compile('\\w+')\n\tfor s in sentences:\n\t\tsent_words = tokenizer.findall(s)\n\t\tpos_full = []\n\t\tpos_med = []\n\t\tpos_simple = []\n\t\tfor w in sent_words:\n\t\t\tlemmas = lexicon.get_lemmas(w)\n\t\t\tpos_full += [set([p[1] for p in lemmas])]\n\t\t\tpos_med += [set([p[1].split(\":\")[0] for p in lemmas])]\n\t\t\tpos_simple += [set([p[1].split(\":\")[0].split(\"+\")[0] for p in lemmas])]\n\t\t\t#print(w, lemmas)\n\t\t\t#print(pos_med)\n\t\t\t#print(pos_simple)\n\t\t\n\t\tif len(candidates_simple) == 0:\n\t\t\t#print(\"TESTE\",pos_simple)\n\t\t\tcandidates_simple = set(itertools.product(*pos_simple))\n\t\t\tcandidates_med = set(itertools.product(*pos_med))\n\t\t\tcandidates_full = set(itertools.product(*pos_full))\n\t\telse:\n\t\t\tcandidates_simple = candidates_simple.intersection(set(itertools.product(*pos_simple)))\n\t\t\tcandidates_med = candidates_med.intersection(set(itertools.product(*pos_med)))\n\t\t\tcandidates_full = candidates_full.intersection(set(itertools.product(*pos_full)))\n\t\t#print(\"ITERTOOLS\")\n\t\t#print(candidates_simple)\n\treturn candidates_simple, candidates_med, candidates_full\n\nsentences = [s[1] for s in inputs]\n\nlog.info(\"Loading Mac-Morpho Tagged Sents...\")\ntsents = list(mac_morpho.tagged_sents())\n\n\ndef simplify_tag(t):\n\tif \"+\" in t:\n\t\tt = t[t.index(\"+\")+1:]\n\t\n\tif t == \"ART\":\n\t\treturn \"DET\"\n\t\n\treturn t\n\nlog.info(\"Simplifyng POS Tags...\")\ntsents = [[(w.lower(),simplify_tag(t)) for (w,t) in sent] for sent in tsents if sent]\n\ntrain = tsents\ntest = tsents[:300]\nlog.info(\"Training POS Taggers...\")\ntagger0 = nltk.DefaultTagger('N')\ntagger1 = nltk.UnigramTagger(train, backoff=tagger0)\ntagger2 = nltk.BigramTagger(train, backoff=tagger1)\n\n#log.info(\"Evaluate tagger\")\n#print(tagger2.evaluate(test))\n\n#log.info(\"TAGSET\")\n#tags = [simplify_tag(tag) for (word,tag) in mac_morpho.tagged_words()]\n#fd = nltk.FreqDist(tags)\n#print(fd.keys())\n\ntokenizer = re.compile('\\w+')\nfor input_id, s in enumerate(sentences):\n\tlog.info(\"Sentence: %s\" % (s))\n\tcandidates_simple, candidates_med, candidates_full = get_candidates([s]) \n\t#print(candidates_simple)\n\ttagged_sent = [tagger2.tag([w])[0][1] for w in tokenizer.findall(s)]\n\t#print(s, tagged_sent)\n\tcandidates_simple = np.array(list(candidates_simple))\n\ttagged_sent = np.array(tagged_sent)\n\tgram_acc = candidates_simple == tagged_sent\n\t#print(gram_acc)\n\tgram_acc = gram_acc.astype(np.float64).sum(axis=1) / gram_acc.shape[1]\n\t#print(gram_acc)\n\t\n\tlog.info(\"Vectorizing...\")\n\tcount_vect = CountVectorizer(dtype=np.float64, token_pattern='\\w+')\n\tX = [\" \".join(tokens) for tokens in candidates_simple]\n\t#print(X)\n\tX_vect = count_vect.fit_transform(X)\n\t#print(X_vect.todense())\n\ttagged_sent_vect = count_vect.transform([\" \".join(tagged_sent)])[0]\n\t#print(tagged_sent_vect)\n\t#print(X[0])\n\t#print(\" \".join(tagged_sent))\n\t#print(tagged_sent_vect.todense())\n\tlog.info(\"(%d, %d)\" % (X_vect.shape[0],X_vect.shape[1]))\n\t\n\tgram_sim = cosine_similarity(X_vect, tagged_sent_vect)\n\t#print(gram_sim)\n\t\n\tif options.ranking_method == \"cosine_similarity\":\n\t\tlog.info(\"Using cosine_similarity ranking...\")\n\t\tgram_rank = gram_sim\n\telif options.ranking_method == \"accuracy\":\n\t\tlog.info(\"Using accuracy ranking...\")\n\t\tgram_rank = gram_acc\n\telse:\n\t\tlog.warning(\"Unknown ranking method %s ignored, using cosine_similarity\")\n\t\tgram_rank = gram_sim\n\t\n\ttop_idx = np.argmax(gram_rank)\n\ttop_gram = candidates_simple[top_idx]\n\n\tcpcand = []\n\tsorted_grams_idx = np.argsort(-gram_rank, axis=0)\n\tfor i, gram_idx in enumerate(sorted_grams_idx):\n\t\tcpcand.append(list(candidates_simple[gram_idx][0]))\n\t\n\tprint([(x) for x in cpcand])\n\n\tselection_strategy = SequentialSelection(tagger2,tokenizer)\n\tsamples = selection_strategy.search_db_samples(cpcand)\n\tgram_rank = selection_strategy.select_grammar(samples)\n\n\tlog.info(\"%s: Writing results...\" % (inputs[input_id]))\n\t'''with open(\"gram-%d.txt\" % (input_id), 'w') as f:\n\t\tf.write(\"%s\\n\" % (inputs[input_id]))\n\t\tf.write(\"Meta: %s\\n\" % (\" \".join(tagged_sent)))\n\t\tsorted_grams_idx = np.argsort(-gram_rank, axis=0)\n\t\t#print(\"sorted_grams_idx\",sorted_grams_idx)\n\t\tfor i, gram_idx in enumerate(sorted_grams_idx):\n\t\t\tgram = candidates_simple[gram_idx][0]\n\t\t\t#print(gram_idx, gram)\n\t\t\tf.write(\"%d: %s - %.03f\\n\" % (i, \" \".join(gram), gram_rank[gram_idx]))\n\t'''\n\twith open(\"gram-%d.txt\" % (input_id), 'w') as f:\n\t\tf.write(\"%s\\n\" % (inputs[input_id]))\n\t\tf.write(\"Meta: %s\\n\" % (\" \".join(tagged_sent)))\n\t\tfor i, gram in enumerate(gram_rank):\n\t\t\ttkn = tokenizer.findall(gram[2])\n\t\t\tf.write(str(i)+\": [pos \"+str(gram[0])+\"] \"+str(gram[1])+\" => - \"+str(tkn[(gram[3])[0]:(gram[3])[1]])+\"\\n\")\n\nlog.info(\"Finished\")", "sub_path": "inc-gram.py", "file_name": "inc-gram.py", "file_ext": "py", "file_size_in_byte": 8973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "optparse.OptionParser", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 39, "usage_type": "attribute"}, {"api_name": "codecs.getwriter", "line_number": 39, "usage_type": "call"}, {"api_name": "locale.getpreferredencoding", "line_number": 39, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 94, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 98, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "call"}, {"api_name": "resource.getrusage", "line_number": 126, "usage_type": "call"}, {"api_name": "resource.RUSAGE_SELF", "line_number": 126, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 134, "usage_type": "attribute"}, {"api_name": "lexicon.Lexicon", "line_number": 143, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 149, "usage_type": "call"}, {"api_name": "lexicon.get_lemmas", "line_number": 156, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 166, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 167, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 168, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 170, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 171, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 172, "usage_type": "call"}, {"api_name": "nltk.corpus.mac_morpho.tagged_sents", "line_number": 180, "usage_type": "call"}, {"api_name": "nltk.corpus.mac_morpho", "line_number": 180, "usage_type": "name"}, {"api_name": "nltk.DefaultTagger", "line_number": 198, "usage_type": "call"}, {"api_name": "nltk.UnigramTagger", "line_number": 199, "usage_type": "call"}, {"api_name": "nltk.BigramTagger", "line_number": 200, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 221, "usage_type": "attribute"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 225, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "480770290", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport scipy.io as sio\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = sio.loadmat('CaseDataAll.mat')\nmarket_data = pd.read_csv('market.csv')\neom = data[\"a_EOM\"]\nmon = data[\"Mon_Yield\"]\nsize = data[\"Mon_SizeAll\"]\nmarket_return = market_data['Mkt_Rf']\nrisk_free = market_data['rf']\n\nfor x in eom:\n x[0] = int(x[0] / 100) # change the month in eom\n\nsize = np.array(size)\nave_size = []\nskews = []\nkurts = []\nfor i in range(60, np.array(size).shape[0]):\n a = pd.Series(size[i])\n ave_size.append(a.mean())\n skews.append(a.skew())\n kurts.append(a.kurt())\n\ndf2 = pd.DataFrame({'month': np.arange(0, np.array(size).shape[0] - 60), 'skew': skews})\n\n# Draw Plot\nplt.figure(figsize=(16, 10), dpi=80)\nplt.plot('month', 'skew', data=df2, color='tab:red')\n\n# Decoration\nplt.ylim(0, 30)\nxtick_location = df2.index.tolist()[::12]\nxtick_labels = [(x // 12) + 2005 for x in df2.month.tolist()[::12]]\nplt.xticks(ticks=xtick_location, labels=xtick_labels, rotation=0, fontsize=12, horizontalalignment='center',\n alpha=.7)\nplt.yticks(fontsize=12, alpha=.7)\nplt.grid(axis='both', alpha=.3)\n\n# Remove borders\nplt.gca().spines[\"top\"].set_alpha(0.0)\nplt.gca().spines[\"bottom\"].set_alpha(0.3)\nplt.gca().spines[\"right\"].set_alpha(0.0)\nplt.gca().spines[\"left\"].set_alpha(0.3)\nplt.gca().set(xlabel='Year', ylabel='Monthly Skew of Size')\nplt.savefig('figure2.png')", "sub_path": "figure2.py", "file_name": "figure2.py", "file_ext": "py", "file_size_in_byte": 1415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scipy.io.loadmat", "line_number": 7, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 7, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "361581330", "text": "import argparse\nimport json\nimport sys\n\nimport redis\n\nfrom pystdlib.uishim import get_selection\nfrom pystdlib.shell import term_create_window, tmux_create_window\nfrom pystdlib import shell_cmd\n\n\nparser = argparse.ArgumentParser(description=\"Execute command over SSH.\")\nparser.add_argument(\"--choices\", dest=\"show_choices\", action=\"store_true\",\n default=False, help=\"show predefined command choices\")\nparser.add_argument(\"--ignore-tmux\", dest=\"ignore_tmux\", action=\"store_true\",\n default=False, help=\"open connection in new terminal window rather than tmux pane\")\n\nargs = parser.parse_args()\n\nr = redis.Redis(host='localhost', port=6379, db=0)\nextra_hosts_data = json.loads(r.get(\"net/extra_hosts\"))\n\nhost = get_selection(extra_hosts_data.keys(), \"ssh to\", case_insensitive=True, lines=10, font=\"@wmFontDmenu@\")\n\n\nif host:\n host_meta = extra_hosts_data[host]\n host_vpn = host_meta.get(\"vpn\", None)\n if host_vpn:\n shell_cmd(f\"vpnctl --start {host_vpn}\")\n ssh_user = host_meta.get(\"user\", None)\n ssh_port = host_meta.get(\"port\", None)\n cmd = f\"ssh{' -l ' + ssh_user if ssh_user else ''}{' -p ' + str(ssh_port) if ssh_port else ''} {host_meta['ips'][0]}\"\n if args.show_choices:\n command_choices = json.loads(r.get(\"net/command_choices\"))\n choice = get_selection(command_choices, \"execute\", case_insensitive=True, lines=5, font=\"@wmFontDmenu@\")\n if choice:\n cmd += f\" -t '{choice}'\"\n else:\n sys.exit(1)\n\n if args.ignore_tmux:\n term_create_window(cmd, term_cmd=[\"@defaultTerminal@\", \"-e\"])\n else:\n result = tmux_create_window(cmd, session_name=host_meta.get(\"tmux\", \"@tmuxDefaultSession@\"),\n window_title=\"ssh :: {host}\")\n if not result:\n term_create_window(cmd, term_cmd=[\"@defaultTerminal@\", \"-e\"])\n", "sub_path": "modules/localnfra/networking/scripts/sshmenu.py", "file_name": "sshmenu.py", "file_ext": "py", "file_size_in_byte": 1880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "pystdlib.uishim.get_selection", "line_number": 23, "usage_type": "call"}, {"api_name": "pystdlib.shell_cmd", "line_number": 30, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "pystdlib.uishim.get_selection", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "pystdlib.shell.term_create_window", "line_number": 43, "usage_type": "call"}, {"api_name": "pystdlib.shell.tmux_create_window", "line_number": 45, "usage_type": "call"}, {"api_name": "pystdlib.shell.term_create_window", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "477285979", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom Web import db\nimport hashlib\nfrom datetime import datetime\nfrom Web.models import *\nfrom decimal import *\nimport xlrd\n\nif __name__ == '__main__':\n db.drop_all()\n db.create_all()\n\n # 系统菜单\n # m = Menu(name='用户管理',link='#',url='',pid=1,published=True,order=0)\n menus = [\n Menu(name='系统管理', link='fa fa-th-larg', url='#', pid=0, published=True, order=0),\n Menu(name='用户管理', link='', url='/user', pid=1, published=True, order=0),\n Menu(name='角色管理', link='', url='#', pid=1, published=True, order=0),\n Menu(name='菜单管理', link='', url='#', pid=1, published=True, order=0),\n Menu(name='权限管理', link='', url='#', pid=1, published=True, order=0),\n Menu(name='会员管理', link='fa', url='#', pid=0, published=True, order=0),\n Menu(name='人力资源', link='', url='#', pid=1, published=True, order=0)\n ]\n for m in menus:\n db.session.add(m)\n\n # 系统用户\n u = User(name='admin', email='@', nickname='管理员', password=hashlib.new(\"md5\", \"admin\".encode(\"utf-8\")).hexdigest(),\n reg_time=datetime.now(), status=0, deleted=False)\n db.session.add(u)\n\n #\n Categorys = [\n AssetCategory(name='医疗设备', code='01'),\n AssetCategory(name='医疗家具', code='02'),\n AssetCategory(name='办公家具', code='03'),\n AssetCategory(name='电子及办公设备', code='04'),\n AssetCategory(name='办公车辆', code='05')\n ]\n for c in Categorys:\n db.session.add(c)\n\n db.session.commit()\n\n types = [\n ManagerType(name='在帐资产'),\n ManagerType(name='在帐资产(行政)'),\n ManagerType(name='报废资产'),\n ManagerType(name='暂存资产'),\n ManagerType(name='实物资产'),\n ManagerType(name='临时资产'),\n ManagerType(name='实物报废')\n\n ]\n for t in types:\n db.session.add(t)\n db.session.commit()\n\n excel = xlrd.open_workbook(u\"资产清单.xls\")\n\n sheetDepartment = excel.sheet_by_name(u\"部门\")\n sheetSupplier = excel.sheet_by_name(u\"供货商\")\n sheetDetail = excel.sheet_by_name(u\"固定资产清单\")\n\n for iRow in range(sheetDepartment.nrows):\n d = Department(name=sheetDepartment.cell(iRow, 0).value)\n db.session.add(d)\n db.session.commit()\n\n for iRow in range(sheetSupplier.nrows):\n c = Supplier(name=sheetSupplier.cell(iRow, 0).value)\n db.session.add(c)\n\n for iRow in range(4, sheetDetail.nrows):\n\n if sheetDetail.cell(iRow, 1).ctype == 0:\n print(sheetDetail.cell(iRow, 0).value)\n break\n supplier = Supplier.query.filter(Supplier.name == sheetDetail.cell(iRow, 2).value).first()\n print(sheetDetail.cell(iRow, 1).ctype)\n # print None if supplier is None else supplier.id\n print(iRow)\n asset = Asset(code=sheetDetail.cell(iRow, 1).value,\n name=sheetDetail.cell(iRow, 5).value,\n supplier_id=None if supplier is None else supplier.id,\n manager_type_id=ManagerType.query.filter(\n ManagerType.name == sheetDetail.cell(iRow, 3).value).first().id,\n asset_category_id=AssetCategory.query.filter(\n AssetCategory.name == sheetDetail.cell(iRow, 4).value).first().id,\n purchase_date=xlrd.xldate.xldate_as_datetime(sheetDetail.cell(iRow, 7).value, 0).date(),\n original_value=Decimal(sheetDetail.cell(iRow, 8).value),\n depreciation_year=int(sheetDetail.cell(iRow, 9).value),\n department_id=Department.query.filter(\n Department.name == sheetDetail.cell(iRow, 10).value).first().id,\n position=sheetDetail.cell(iRow, 11).value,\n remark=sheetDetail.cell(iRow, 12).value\n )\n\n db.session.add(asset)\n # print Supplier.query.filter(Supplier.name == sheetDetail.cell(iRow, 2).value).first().id\n # print xlrd.xldate.xldate_as_datetime(sheetDetail.cell(iRow, 7).value, 0).date()\n\n db.session.commit()\n", "sub_path": "db_init.py", "file_name": "db_init.py", "file_ext": "py", "file_size_in_byte": 4262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Web.db.drop_all", "line_number": 12, "usage_type": "call"}, {"api_name": "Web.db", "line_number": 12, "usage_type": "name"}, {"api_name": "Web.db.create_all", "line_number": 13, "usage_type": "call"}, {"api_name": "Web.db", "line_number": 13, "usage_type": "name"}, {"api_name": "Web.db.session.add", "line_number": 27, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 27, "usage_type": "name"}, {"api_name": "hashlib.new", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "Web.db.session.add", "line_number": 32, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 32, "usage_type": "name"}, {"api_name": "Web.db.session.add", "line_number": 43, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 43, "usage_type": "name"}, {"api_name": "Web.db.session.commit", "line_number": 45, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 45, "usage_type": "name"}, {"api_name": "Web.db.session.add", "line_number": 58, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 58, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 58, "usage_type": "name"}, {"api_name": "Web.db.session.commit", "line_number": 59, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 59, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 59, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 61, "usage_type": "call"}, {"api_name": "Web.db.session.add", "line_number": 69, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 69, "usage_type": "name"}, {"api_name": "Web.db.session.commit", "line_number": 70, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 70, "usage_type": "name"}, {"api_name": "Web.db.session.add", "line_number": 74, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 74, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 74, "usage_type": "name"}, {"api_name": "xlrd.xldate.xldate_as_datetime", "line_number": 92, "usage_type": "call"}, {"api_name": "xlrd.xldate", "line_number": 92, "usage_type": "attribute"}, {"api_name": "Web.db.session.add", "line_number": 101, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 101, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 101, "usage_type": "name"}, {"api_name": "Web.db.session.commit", "line_number": 105, "usage_type": "call"}, {"api_name": "Web.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "Web.db", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "102554322", "text": "import logging\nimport os\nfrom collections import namedtuple\nimport numpy as np\nimport pandas as pd\nimport fenics as fe\nimport scipy as sp\nfrom netCDF4 import Dataset, num2date\nfrom metpy.units import units\nimport metpy.calc.thermo as thermo\nfrom siphon.simplewebservice.wyoming import WyomingUpperAir\nfrom letkf_forecasting import __version__\nfrom letkf_forecasting.optical_flow import optical_flow\nfrom letkf_forecasting.letkf_io import (\n return_analysis_ensemble,\n return_single_time,\n extract_components,\n save_netcdf,\n read_coords,\n)\nfrom letkf_forecasting.random_functions import (\n perturb_irradiance,\n eig_2d_covariance,\n perturb_winds,\n)\nfrom letkf_forecasting.advection import (\n advect_5min_ensemble,\n remove_divergence_ensemble,\n noise_fun,\n advect_5min_single,\n remove_divergence_single\n)\nfrom letkf_forecasting.assimilation_accessories import (\n ensemble_creator,\n assimilation_position_generator\n)\nfrom letkf_forecasting.assimilation import (\n assimilate_sat_to_wind,\n assimilate_wrf,\n reduced_enkf,\n)\n\n\ndef set_up_param_dict(*, date, io, flags, advect_params, ens_params,\n pert_params, sat2sat, sat2wind, wrf, opt_flow):\n param_dict = date.copy()\n param_dict.update(io)\n param_dict.update(advect_params)\n param_dict.update(ens_params)\n param_dict.update(pert_params)\n for adict in [flags, sat2sat, sat2wind, wrf, opt_flow]:\n temp = adict.copy()\n name = temp['name'] + '_'\n del temp['name']\n keys = list(temp.keys())\n for k in keys:\n temp[name + k] = temp.pop(k)\n param_dict.update(temp)\n param_dict['git_version'] = __version__\n return param_dict\n\n\ndef dict2nt(adict, aname):\n nt = namedtuple(aname, adict.keys())(**adict)\n return nt\n\n\ndef calc_system_variables(*, coords, advect_params, flags, pert_params):\n dx = (coords.we[1] - coords.we[0])*1000\n dy = (coords.sn[1] - coords.sn[0])*1000 # dx, dy in m not km\n max_horizon = pd.Timedelta(advect_params['max_horizon'])\n ci_crop_shape = np.array([coords.sn_crop.size,\n coords.we_crop.size],\n dtype='int')\n U_crop_shape = np.array([coords.sn_crop.size,\n coords.we_stag_crop.size],\n dtype='int')\n V_crop_shape = np.array([coords.sn_stag_crop.size,\n coords.we_crop.size],\n dtype='int')\n U_crop_size = U_crop_shape[0]*U_crop_shape[1]\n V_crop_size = V_crop_shape[0]*V_crop_shape[1]\n wind_size = U_crop_size + V_crop_size\n num_of_horizons = int((max_horizon/15).seconds/60)\n sys_vars = {'dx': dx, 'dy': dy,\n 'num_of_horizons': num_of_horizons,\n 'max_horizon': max_horizon,\n 'ci_crop_shape': ci_crop_shape,\n 'U_crop_shape': U_crop_shape,\n 'V_crop_shape': V_crop_shape,\n 'U_crop_size': U_crop_size,\n 'V_crop_size': V_crop_size,\n 'wind_size': wind_size}\n if flags['div']:\n mesh = fe.RectangleMesh(fe.Point(0, 0),\n fe.Point(int(V_crop_shape[1] - 1),\n int(U_crop_shape[0] - 1)),\n int(V_crop_shape[1] - 1),\n int(U_crop_shape[0] - 1))\n FunctionSpace_wind = fe.FunctionSpace(mesh, 'P', 1)\n sys_vars['FunctionSpace_wind'] = FunctionSpace_wind\n if flags['perturbation']:\n rf_eig, rf_vectors = eig_2d_covariance(\n x=coords.we_crop, y=coords.sn_crop,\n Lx=pert_params['Lx'],\n Ly=pert_params['Ly'], tol=pert_params['tol'])\n rf_approx_var = (\n rf_vectors * rf_eig[None, :] * rf_vectors).sum(-1).mean()\n sys_vars['rf_eig'] = rf_eig\n sys_vars['rf_vectors'] = rf_vectors\n sys_vars['rf_approx_var'] = rf_approx_var\n if flags['perturb_winds']:\n rf_eig, rf_vectors = eig_2d_covariance(\n coords.we_crop, coords.sn_crop,\n Lx=pert_params['Lx_wind'],\n Ly=pert_params['Ly_wind'], tol=pert_params['tol_wind'])\n rf_approx_var = (\n rf_vectors * rf_eig[None, :] * rf_vectors).sum(-1).mean()\n rf_eig = rf_eig*pert_params['Lx_wind']**2\n sys_vars['rf_eig_wind'] = rf_eig\n sys_vars['rf_vectors_wind'] = rf_vectors\n sys_vars['rf_approx_var_wind'] = rf_approx_var\n sys_vars = dict2nt(sys_vars, 'sys_vars')\n return sys_vars\n\n\ndef calc_assim_variables(*, sys_vars, advect_params, flags, sat2sat, sat2wind,\n wrf):\n assim_vars = {}\n if flags['assim_sat2sat']:\n assim_pos, assim_pos_2d, full_pos_2d = (\n assimilation_position_generator(sys_vars.ci_crop_shape,\n sat2sat['grid_size']))\n noise_init = noise_fun(sys_vars.ci_crop_shape)\n assim_vars['assim_pos'] = assim_pos\n assim_vars['assim_pos_2d'] = assim_pos_2d\n assim_vars['full_pos_2d'] = full_pos_2d\n assim_vars['noise_init'] = noise_init\n if flags['assim_sat2wind']:\n assim_pos_sat2wind, assim_pos_2d_sat2wind, full_pos_2d_sat2wind = (\n assimilation_position_generator(sys_vars.ci_crop_shape,\n sat2wind['grid_size']))\n assim_vars['assim_pos_sat2wind'] = assim_pos_sat2wind\n assim_vars['assim_pos_2d_sat2wind'] = assim_pos_2d_sat2wind\n assim_vars['full_pos_2d_sat2wind'] = full_pos_2d_sat2wind\n assim_pos_U, assim_pos_2d_U, full_pos_2d_U = (\n assimilation_position_generator(sys_vars.U_crop_shape,\n sat2wind['grid_size']))\n assim_pos_V, assim_pos_2d_V, full_pos_2d_V = (\n assimilation_position_generator(sys_vars.V_crop_shape,\n sat2wind['grid_size']))\n assim_vars['assim_pos_U'] = assim_pos_U\n assim_vars['assim_pos_2d_U'] = assim_pos_2d_U\n assim_vars['full_pos_2d_U'] = full_pos_2d_U\n assim_vars['assim_pos_V'] = assim_pos_V\n assim_vars['assim_pos_2d_V'] = assim_pos_2d_V\n assim_vars['full_pos_2d_V'] = full_pos_2d_V\n if flags['assim_wrf']:\n assim_pos_U_wrf, assim_pos_2d_U_wrf, full_pos_2d_U_wrf = (\n assimilation_position_generator(sys_vars.U_crop_shape,\n wrf['grid_size']))\n assim_pos_V_wrf, assim_pos_2d_V_wrf, full_pos_2d_V_wrf = (\n assimilation_position_generator(sys_vars.V_crop_shape,\n wrf['grid_size']))\n assim_vars['assim_pos_U_wrf'] = assim_pos_U_wrf\n assim_vars['assim_pos_2d_U_wrf'] = assim_pos_2d_U_wrf\n assim_vars['full_pos_2d_U_wrf'] = full_pos_2d_U_wrf\n assim_vars['assim_pos_V_wrf'] = assim_pos_V_wrf\n assim_vars['assim_pos_2d_V_wrf'] = assim_pos_2d_V_wrf\n assim_vars['full_pos_2d_V_wrf'] = full_pos_2d_V_wrf\n assim_vars = dict2nt(assim_vars, 'assim_vars')\n return assim_vars\n\n\ndef return_wind_time(*, sat_time, coords):\n int_index_wind = coords.wind_times.get_loc(sat_time,\n method='pad')\n wind_time = coords.wind_times[int_index_wind]\n return wind_time\n\n\ndef return_ensemble(*, data_file_path, ens_params, coords, flags):\n sat_time = coords.sat_times[0]\n wind_time = return_wind_time(sat_time=sat_time, coords=coords)\n q = return_single_time(data_file_path, coords.sat_times_all,\n sat_time, [coords.sn_slice],\n [coords.we_slice], ['ci'])[0]\n if flags['radiosonde']:\n station = 'TUS'\n df = WyomingUpperAir.request_data(sat_time.date(), station)\n T = df['temperature'].values * units(df.units['temperature'])\n Td = df['dewpoint'].values * units(df.units['dewpoint'])\n u_wind = df['u_wind'].values * units(df.units['u_wind'])\n u_wind = u_wind.to(units.meter/units.second)\n v_wind = df['v_wind'].values * units(df.units['v_wind'])\n v_wind = v_wind.to(units.meter/units.second)\n rh = thermo.relative_humidity_from_dewpoint(T, Td)\n max_arg = np.argmax(rh)\n u_size = coords.we_stag_crop.size * coords.sn_crop.size\n v_size = coords.we_crop.size * coords.sn_stag_crop.size\n U = np.ones(u_size)*u_wind[max_arg]\n V = np.ones(v_size)*v_wind[max_arg]\n elif flags['opt_flow']:\n opt_flow_folder = os.path.split(data_file_path)[0]\n opt_flow_file = os.path.join(opt_flow_folder, 'data_opt_flow.nc')\n of_sat_time = coords.sat_times[1]\n U, V = return_single_time(opt_flow_file, coords.sat_times_all,\n of_sat_time,\n [coords.sn_slice, coords.sn_stag_slice],\n [coords.we_stag_slice, coords.we_slice],\n ['U_opt_flow', 'V_opt_flow'])\n\n time_step = (of_sat_time - sat_time).seconds\n U = U * (250 / time_step)\n V = V * (250 / time_step)\n U = U.clip(min=-50, max=50)\n V = V.clip(min=-50, max=50)\n else:\n U, V = return_single_time(data_file_path, coords.wind_times, wind_time,\n [coords.sn_slice, coords.sn_stag_slice],\n [coords.we_stag_slice, coords.we_slice],\n ['U', 'V'])\n U, V = smooth_winds(U, V)\n if flags['wrf_mean']:\n U = np.ones_like(U)*U.mean()\n V = np.ones_like(V)*V.mean()\n if flags['assim']:\n ensemble = ensemble_creator(\n q, U, V, CI_sigma=ens_params['ci_sigma'],\n wind_sigma=ens_params['winds_sigma'],\n ens_size=ens_params['ens_num'])\n else:\n ensemble = np.concatenate([U.ravel(), V.ravel(), q.ravel()])[:, None]\n shape = ensemble.shape\n ensemble = np.ma.compressed(ensemble).reshape(shape)\n return ensemble\n\n\ndef forecast_setup(*, data_file_path, date, io, advect_params, ens_params,\n pert_params, flags, sat2sat, sat2wind, wrf, opt_flow,\n results_file_path):\n param_dict = set_up_param_dict(\n date=date, io=io, advect_params=advect_params, ens_params=ens_params,\n pert_params=pert_params, flags=flags, sat2sat=sat2sat,\n sat2wind=sat2wind, wrf=wrf, opt_flow=opt_flow)\n coords = read_coords(data_file_path=data_file_path,\n advect_params=advect_params, flags=flags)\n sys_vars = calc_system_variables(\n coords=coords, advect_params=advect_params, flags=flags,\n pert_params=pert_params)\n if 'analysis_fore' in flags:\n if flags['analysis_fore']:\n sat_time = coords.sat_times[0]\n ensemble = return_analysis_ensemble(\n sat_time=sat_time, results_file_path=results_file_path)\n else:\n ensemble = return_ensemble(data_file_path=data_file_path,\n ens_params=ens_params,\n coords=coords, flags=flags)\n else:\n ensemble = return_ensemble(data_file_path=data_file_path,\n ens_params=ens_params,\n coords=coords, flags=flags)\n if flags['assim']:\n assim_vars = calc_assim_variables(sys_vars=sys_vars,\n advect_params=advect_params,\n flags=flags, sat2sat=sat2sat,\n sat2wind=sat2wind, wrf=wrf)\n else:\n assim_vars = None\n return param_dict, coords, sys_vars, assim_vars, ensemble\n\n\ndef preprocess(*, ensemble, flags, remove_div_flag, coords, sys_vars):\n if remove_div_flag and flags['div']:\n logging.debug('remove divergence')\n ensemble[:sys_vars.wind_size] = remove_divergence_ensemble(\n FunctionSpace=sys_vars.FunctionSpace_wind,\n wind_ensemble=ensemble[:sys_vars.wind_size],\n U_crop_shape=sys_vars.U_crop_shape,\n V_crop_shape=sys_vars.V_crop_shape, sigma=4)\n return ensemble\n\n\ndef forecast(*, ensemble, flags, coords, time_index, sat_time,\n sys_vars, advect_params, pert_params, assim_vars, workers):\n save_times = pd.date_range(sat_time,\n periods=(sys_vars.num_of_horizons + 1),\n freq='15min')\n save_times = save_times.tz_convert(None)\n if time_index + 1 < coords.sat_times.size:\n num_of_advect = int((\n coords.sat_times[time_index + 1] -\n coords.sat_times[time_index]).seconds/(60*15))\n else:\n num_of_advect = 0\n background = None\n logging.debug(f'15min steps to background: {num_of_advect}')\n ensemble_array = ensemble.copy()[None, :, :]\n cx = abs(ensemble[:sys_vars.U_crop_size]).max()\n cy = abs(ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size]).max()\n T_steps = int(np.ceil((5*60)*(cx/sys_vars.dx\n + cy/sys_vars.dy)\n / advect_params['C_max']))\n dt = (5*60)/T_steps\n for m in range(sys_vars.num_of_horizons):\n logging.info(str(pd.Timedelta('15min')*(m + 1)))\n for n in range(3):\n if flags['perturb_winds']:\n ensemble[:sys_vars.wind_size] = perturb_winds(\n ensemble[:sys_vars.wind_size], sys_vars, pert_params)\n\n if flags['assim']:\n ensemble = advect_5min_ensemble(\n ensemble, dt, sys_vars.dx, sys_vars.dy,\n T_steps,\n sys_vars.U_crop_shape, sys_vars.V_crop_shape,\n sys_vars.ci_crop_shape, workers)\n else:\n ensemble[:, 0] = advect_5min_single(\n ensemble[:, 0], dt, sys_vars.dx, sys_vars.dy,\n T_steps,\n sys_vars.U_crop_shape, sys_vars.V_crop_shape,\n sys_vars.ci_crop_shape)\n ensemble[sys_vars.wind_size:] = (ensemble[sys_vars.wind_size:]\n .clip(min=0, max=1))\n if flags['perturbation']:\n ensemble[sys_vars.wind_size:] = perturb_irradiance(\n ensemble[sys_vars.wind_size:], sys_vars.ci_crop_shape,\n pert_params['edge_weight'],\n pert_params['pert_mean'],\n pert_params['pert_sigma'],\n sys_vars.rf_approx_var,\n sys_vars.rf_eig, sys_vars.rf_vectors)\n ensemble_array = np.concatenate(\n [ensemble_array, ensemble[None, :, :]],\n axis=0)\n if num_of_advect == (m + 1):\n background = ensemble.copy()\n return ensemble_array, save_times, background\n\n\ndef save(*, ensemble_array, coords, ens_params, param_dict, sys_vars,\n save_times, results_file_path, flags):\n U, V, ci = extract_components(\n ensemble_array, ens_params['ens_num'], sys_vars.num_of_horizons + 1,\n sys_vars.U_crop_shape, sys_vars.V_crop_shape, sys_vars.ci_crop_shape)\n save_netcdf(\n results_file_path, U, V, ci, param_dict,\n coords.we_crop, coords.sn_crop,\n coords.we_stag_crop, coords.sn_stag_crop,\n save_times, ens_params['ens_num'], flags)\n\n\ndef maybe_assim_sat2sat(*, ensemble, data_file_path, sat_time,\n coords, sys_vars, flags):\n if 'analysis_fore' in flags:\n if flags['analysis_fore']:\n return ensemble\n if flags['assim_sat2sat']:\n raise NotImplementedError\n else:\n q = return_single_time(data_file_path, coords.sat_times_all,\n sat_time, [coords.sn_slice], [coords.we_slice],\n ['ci'])[0]\n ensemble[sys_vars.wind_size:] = q.ravel()[:, None]\n return ensemble\n\n\ndef maybe_assim_sat2wind(*, ensemble, data_file_path, sat_time,\n coords, sys_vars, assim_vars, sat2wind,\n flags):\n if 'analysis_fore' in flags:\n if flags['analysis_fore']:\n return ensemble, False\n if flags['assim_sat2wind']:\n logging.debug('Assim sat2wind')\n q = return_single_time(data_file_path, coords.sat_times_all,\n sat_time, [coords.sn_slice], [coords.we_slice],\n ['ci'])[0]\n ensemble = assimilate_sat_to_wind(\n ensemble=ensemble,\n observations=q.ravel(),\n R_inverse_wind=1/sat2wind['sig']**2,\n wind_inflation=sat2wind['infl'],\n domain_shape=sys_vars.ci_crop_shape,\n U_shape=sys_vars.U_crop_shape,\n V_shape=sys_vars.V_crop_shape,\n localization_length_wind=sat2wind['loc'],\n assimilation_positions=assim_vars.assim_pos_sat2wind,\n assimilation_positions_2d=assim_vars.assim_pos_2d_sat2wind,\n full_positions_2d=assim_vars.full_pos_2d_sat2wind)\n div_sat2wind_flag = True\n else:\n div_sat2wind_flag = False\n return ensemble, div_sat2wind_flag\n\n\ndef maybe_assim_wrf(*, ensemble, data_file_path, sat_time,\n coords, sys_vars, assim_vars, wrf,\n ens_params, flags):\n if 'analysis_fore' in flags:\n if flags['analysis_fore']:\n return ensemble, False\n wind_time = return_wind_time(sat_time=sat_time, coords=coords)\n if sat_time == wind_time and not flags['radiosonde']:\n U, V = return_single_time(data_file_path, coords.wind_times,\n wind_time,\n [coords.sn_slice, coords.sn_stag_slice],\n [coords.we_stag_slice, coords.we_slice],\n ['U', 'V'])\n U, V = smooth_winds(U, V)\n if flags['wrf_mean']:\n U = np.ones_like(U)*U.mean()\n V = np.ones_like(V)*V.mean()\n div_wrf_flag = True\n if flags['assim_wrf']:\n logging.debug('Assim WRF')\n R_inverse = 1/wrf['sig']**2\n localization_length_wind = int(round(\n wrf['loc'] / (coords.we[1] - coords.we[0])))\n ensemble[:sys_vars.U_crop_size] = assimilate_wrf(\n ensemble=ensemble[:sys_vars.U_crop_size],\n observations=U.ravel(),\n R_inverse=R_inverse,\n wind_inflation=wrf['infl'],\n wind_shape=sys_vars.U_crop_shape,\n localization_length_wind=localization_length_wind,\n assimilation_positions=assim_vars.assim_pos_U_wrf,\n assimilation_positions_2d=assim_vars.assim_pos_2d_U_wrf,\n full_positions_2d=assim_vars.full_pos_2d_U_wrf)\n\n ensemble[sys_vars.U_crop_size:sys_vars.wind_size] = assimilate_wrf(\n ensemble=ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size],\n observations=V.ravel(),\n R_inverse=R_inverse,\n wind_inflation=wrf['infl'],\n wind_shape=sys_vars.V_crop_shape,\n localization_length_wind=localization_length_wind,\n assimilation_positions=assim_vars.assim_pos_V_wrf,\n assimilation_positions_2d=assim_vars.assim_pos_2d_V_wrf,\n full_positions_2d=assim_vars.full_pos_2d_V_wrf)\n elif not flags['opt_flow']:\n logging.debug('replace WRF')\n if ensemble.shape[1] > 1:\n random_nums = np.random.normal(\n loc=0,\n scale=ens_params['winds_sigma'][0],\n size=ens_params['ens_num'])\n ensemble[:sys_vars.U_crop_size] = (U.ravel()[:, None]\n + random_nums[None, :])\n random_nums = np.random.normal(\n loc=0,\n scale=ens_params['winds_sigma'][1],\n size=ens_params['ens_num'])\n ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size] = (\n V.ravel()[:, None]\n + random_nums[None, :])\n else:\n ensemble[:sys_vars.U_crop_size] = U.ravel()[:, None]\n ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size] = V.ravel()[:, None]\n else:\n div_wrf_flag = False\n return ensemble, div_wrf_flag\n\n\ndef smooth_winds(U, V):\n U = sp.ndimage.filters.gaussian_filter(U, sigma=60)\n V = sp.ndimage.filters.gaussian_filter(V, sigma=60)\n return U, V\n\n\ndef return_opt_flow(*, coords, time_index, sat_time, data_file_path, sys_vars):\n # retreive OPT_FLOW vectors\n wind_time = return_wind_time(sat_time=sat_time, coords=coords)\n time0 = coords.sat_times[time_index - 1]\n this_U, this_V = return_single_time(data_file_path, coords.wind_times,\n wind_time,\n [slice(None), slice(None)],\n [slice(None), slice(None)],\n ['U', 'V'])\n image0 = return_single_time(data_file_path, coords.sat_times_all,\n time0, [slice(None)], [slice(None)],\n ['ci'])[0]\n image1 = return_single_time(data_file_path, coords.sat_times_all,\n sat_time, [slice(None)], [slice(None)],\n ['ci'])[0]\n u_opt_flow, v_opt_flow, pos = optical_flow(image0, image1,\n time0, sat_time,\n this_U, this_V)\n if u_opt_flow.size == 0:\n nothing = np.array([])\n return nothing, nothing, nothing, nothing\n del this_U, this_V, image0, image1\n pos = pos*4 # optical flow done on coarse grid\n\n # need to select only pos in crop domain; convert to crop\n keep = np.logical_and(\n np.logical_and(pos[:, 0] > coords.we_slice.start,\n pos[:, 0] < coords.we_slice.stop),\n np.logical_and(pos[:, 1] > coords.sn_slice.start,\n pos[:, 1] < coords.sn_slice.stop))\n pos = pos[keep]\n u_opt_flow = u_opt_flow[keep]\n v_opt_flow = v_opt_flow[keep]\n pos[:, 0] -= coords.we_slice.start\n pos[:, 1] -= coords.sn_slice.start\n pos = pos.T\n pos = pos[::-1]\n u_opt_flow_flat_pos = np.ravel_multi_index(pos, sys_vars.U_crop_shape)\n v_opt_flow_flat_pos = np.ravel_multi_index(pos, sys_vars.V_crop_shape)\n return u_opt_flow, v_opt_flow, u_opt_flow_flat_pos, v_opt_flow_flat_pos\n\n\ndef maybe_assim_opt_flow(*, ensemble, data_file_path, sat_time, time_index,\n coords, sys_vars, flags, opt_flow):\n if flags['assim_opt_flow']:\n div_opt_flow_flag = True\n logging.debug('calc opt_flow')\n returned = return_opt_flow(\n coords=coords, time_index=time_index, sat_time=sat_time,\n data_file_path=data_file_path, sys_vars=sys_vars)\n u_opt_flow, v_opt_flow = returned[:2]\n u_opt_flow_flat_pos, v_opt_flow_flat_pos = returned[2:]\n if u_opt_flow.size == 0:\n div_opt_flow_flag = False\n to_return = (ensemble, div_opt_flow_flag)\n return to_return\n logging.debug('assim opt_flow')\n x_temp = np.arange(sys_vars.U_crop_shape[1])*sys_vars.dx/1000 # in km\n y_temp = np.arange(sys_vars.U_crop_shape[0])*sys_vars.dy/1000\n x_temp, y_temp = np.meshgrid(x_temp, y_temp)\n ensemble[:sys_vars.U_crop_size] = reduced_enkf(\n ensemble=ensemble[:sys_vars.U_crop_size],\n observations=u_opt_flow, R_sig=opt_flow['sig'],\n flat_locations=u_opt_flow_flat_pos,\n inflation=opt_flow['infl'],\n localization=opt_flow['loc'],\n x=x_temp.ravel(), y=y_temp.ravel())\n x_temp = np.arange(sys_vars.V_crop_shape[1])*sys_vars.dx/1000\n y_temp = np.arange(sys_vars.V_crop_shape[0])*sys_vars.dy/1000\n x_temp, y_temp = np.meshgrid(x_temp, y_temp)\n ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size] = reduced_enkf(\n ensemble=ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size],\n observations=v_opt_flow, R_sig=opt_flow['sig'],\n flat_locations=v_opt_flow_flat_pos,\n inflation=opt_flow['infl'],\n localization=opt_flow['loc'],\n x=x_temp.ravel(), y=y_temp.ravel())\n to_return = (ensemble, div_opt_flow_flag, u_opt_flow, v_opt_flow,\n u_opt_flow_flat_pos, v_opt_flow_flat_pos)\n elif flags['opt_flow']:\n div_opt_flow_flag = True\n opt_flow_folder = os.path.split(data_file_path)[0]\n opt_flow_file = os.path.join(opt_flow_folder, 'data_opt_flow.nc')\n U, V = return_single_time(opt_flow_file, coords.sat_times_all,\n sat_time,\n [coords.sn_slice, coords.sn_stag_slice],\n [coords.we_stag_slice, coords.we_slice],\n ['U_opt_flow', 'V_opt_flow'])\n time_step = (sat_time - coords.sat_times[time_index - 1]).seconds\n U = U * (250 / time_step)\n V = V * (250 / time_step)\n U = U.clip(min=-50, max=50)\n V = V.clip(min=-50, max=50)\n ensemble[:sys_vars.U_crop_size] = U.ravel()[:, None]\n ensemble[sys_vars.U_crop_size:\n sys_vars.wind_size] = V.ravel()[:, None]\n else:\n div_opt_flow_flag = False\n to_return = (ensemble, div_opt_flow_flag)\n return to_return\n\n\ndef maybe_load_analysis(*, sat_time, results_file_path, flags,\n ensemble):\n if 'analysis_fore' in flags:\n if flags['analysis_fore']:\n ensemble = return_analysis_ensemble(\n sat_time=sat_time, results_file_path=results_file_path)\n return ensemble\n else:\n return ensemble\n else:\n return ensemble\n\n\ndef forecast_system(*, data_file_path, results_file_path,\n date, io, flags, advect_params, ens_params, pert_params,\n sat2sat, sat2wind, wrf, opt_flow, workers):\n param_dict, coords, sys_vars, assim_vars, ensemble = forecast_setup(\n data_file_path=data_file_path, date=date, io=io,\n flags=flags, advect_params=advect_params,\n ens_params=ens_params, pert_params=pert_params,\n sat2sat=sat2sat, sat2wind=sat2wind, wrf=wrf,\n opt_flow=opt_flow, results_file_path=results_file_path)\n remove_div_flag = True\n ensemble = preprocess(\n ensemble=ensemble, flags=flags,\n remove_div_flag=remove_div_flag,\n coords=coords, sys_vars=sys_vars)\n time_index = 0\n sat_time = coords.sat_times[time_index]\n ensemble_array, save_times, ensemble = forecast(\n ensemble=ensemble, sat_time=sat_time,\n flags=flags, coords=coords, time_index=time_index,\n sys_vars=sys_vars,\n advect_params=advect_params, pert_params=pert_params,\n assim_vars=assim_vars, workers=workers)\n save(ensemble_array=ensemble_array, coords=coords,\n ens_params=ens_params, param_dict=param_dict,\n sys_vars=sys_vars, save_times=save_times,\n results_file_path=results_file_path,\n flags=flags)\n for time_index in range(1, coords.sat_times.size):\n sat_time = coords.sat_times[time_index]\n logging.info(str(sat_time))\n ensemble = maybe_load_analysis(\n sat_time=sat_time, flags=flags,\n ensemble=ensemble, results_file_path=results_file_path)\n ensemble = maybe_assim_sat2sat(\n ensemble=ensemble, data_file_path=data_file_path,\n sat_time=sat_time, coords=coords, sys_vars=sys_vars,\n flags=flags)\n ensemble, div_sat2wind_flag = maybe_assim_sat2wind(\n ensemble=ensemble, data_file_path=data_file_path,\n sat_time=sat_time, coords=coords, sys_vars=sys_vars,\n assim_vars=assim_vars, sat2wind=sat2wind,\n flags=flags)\n ensmeble, div_wrf_flag = maybe_assim_wrf(\n ensemble=ensemble, data_file_path=data_file_path,\n sat_time=sat_time, coords=coords, sys_vars=sys_vars,\n assim_vars=assim_vars, wrf=wrf,\n ens_params=ens_params,\n flags=flags)\n ensemble, div_opt_flow_flag = maybe_assim_opt_flow(\n ensemble=ensemble, data_file_path=data_file_path,\n sat_time=sat_time, time_index=time_index,\n coords=coords, sys_vars=sys_vars,\n flags=flags, opt_flow=opt_flow)\n remove_div_flag = (div_sat2wind_flag\n or div_wrf_flag\n or div_opt_flow_flag)\n ensemble = preprocess(\n ensemble=ensemble, flags=flags,\n remove_div_flag=remove_div_flag,\n coords=coords, sys_vars=sys_vars)\n ensemble_array, save_times, ensemble = forecast(\n ensemble=ensemble, sat_time=sat_time,\n flags=flags, coords=coords, time_index=time_index,\n sys_vars=sys_vars,\n advect_params=advect_params, pert_params=pert_params,\n assim_vars=assim_vars, workers=workers)\n save(ensemble_array=ensemble_array, coords=coords,\n ens_params=ens_params, param_dict=param_dict,\n sys_vars=sys_vars, save_times=save_times,\n results_file_path=results_file_path,\n flags=flags)\n", "sub_path": "letkf_forecasting/letkf_forecasting.py", "file_name": "letkf_forecasting.py", "file_ext": "py", "file_size_in_byte": 29803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "letkf_forecasting.__version__", "line_number": 59, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "fenics.RectangleMesh", "line_number": 95, "usage_type": "call"}, {"api_name": "fenics.Point", "line_number": 95, "usage_type": "call"}, {"api_name": "fenics.Point", "line_number": 96, "usage_type": "call"}, {"api_name": "fenics.FunctionSpace", "line_number": 100, "usage_type": "call"}, {"api_name": "letkf_forecasting.random_functions.eig_2d_covariance", "line_number": 103, "usage_type": "call"}, {"api_name": "letkf_forecasting.random_functions.eig_2d_covariance", "line_number": 113, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 132, "usage_type": "call"}, {"api_name": "letkf_forecasting.advection.noise_fun", "line_number": 134, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 141, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 147, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 150, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 160, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.assimilation_position_generator", "line_number": 163, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 185, "usage_type": "call"}, {"api_name": "siphon.simplewebservice.wyoming.WyomingUpperAir.request_data", "line_number": 190, "usage_type": "call"}, {"api_name": "siphon.simplewebservice.wyoming.WyomingUpperAir", "line_number": 190, "usage_type": "name"}, {"api_name": "metpy.units.units", "line_number": 191, "usage_type": "call"}, {"api_name": "metpy.units.units", "line_number": 192, "usage_type": "call"}, {"api_name": "metpy.units.units", "line_number": 193, "usage_type": "call"}, {"api_name": "metpy.units.units.meter", "line_number": 194, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 194, "usage_type": "name"}, {"api_name": "metpy.units.units.second", "line_number": 194, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 195, "usage_type": "call"}, {"api_name": "metpy.units.units.meter", "line_number": 196, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 196, "usage_type": "name"}, {"api_name": "metpy.units.units.second", "line_number": 196, "usage_type": "attribute"}, {"api_name": "metpy.calc.thermo.relative_humidity_from_dewpoint", "line_number": 197, "usage_type": "call"}, {"api_name": "metpy.calc.thermo", "line_number": 197, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 207, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 226, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation_accessories.ensemble_creator", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.ma.compressed", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 235, "usage_type": "attribute"}, {"api_name": "letkf_forecasting.letkf_io.read_coords", "line_number": 246, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_analysis_ensemble", "line_number": 254, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 276, "usage_type": "call"}, {"api_name": "letkf_forecasting.advection.remove_divergence_ensemble", "line_number": 277, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 287, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 303, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 308, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 308, "usage_type": "call"}, {"api_name": "letkf_forecasting.random_functions.perturb_winds", "line_number": 311, "usage_type": "call"}, {"api_name": "letkf_forecasting.advection.advect_5min_ensemble", "line_number": 315, "usage_type": "call"}, {"api_name": "letkf_forecasting.advection.advect_5min_single", "line_number": 321, "usage_type": "call"}, {"api_name": "letkf_forecasting.random_functions.perturb_irradiance", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 336, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.extract_components", "line_number": 346, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.save_netcdf", "line_number": 349, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 364, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 378, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 379, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation.assimilate_sat_to_wind", "line_number": 382, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 416, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 419, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation.assimilate_wrf", "line_number": 423, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation.assimilate_wrf", "line_number": 434, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 448, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 454, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 472, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 472, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 473, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 473, "usage_type": "attribute"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 481, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 486, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 489, "usage_type": "call"}, {"api_name": "letkf_forecasting.optical_flow.optical_flow", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 505, "usage_type": "call"}, {"api_name": "numpy.ravel_multi_index", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.ravel_multi_index", "line_number": 515, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 523, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 536, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation.reduced_enkf", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 546, "usage_type": "call"}, {"api_name": "letkf_forecasting.assimilation.reduced_enkf", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 560, "usage_type": "call"}, {"api_name": "os.path", "line_number": 560, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 561, "usage_type": "call"}, {"api_name": "os.path", "line_number": 561, "usage_type": "attribute"}, {"api_name": "letkf_forecasting.letkf_io.return_single_time", "line_number": 562, "usage_type": "call"}, {"api_name": "letkf_forecasting.letkf_io.return_analysis_ensemble", "line_number": 585, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 623, "usage_type": "call"}]} +{"seq_id": "57362755", "text": "import json#cPickle as pickle\nimport cv2\nimport numpy as np\nfrom sys import stdout\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint\nfrom scipy.misc import imsave\nimport time\nfrom keras import backend as K\n\n\n\nfrom sklearn.datasets import load_files \nfrom keras.utils import np_utils\nimport numpy as np\nfrom glob import glob\n\n# define function to load train, test, and validation datasets\ndef load_dataset(path):\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return dog_files, dog_targets\n\n\n\n\nfrom keras.preprocessing import image \nfrom tqdm import tqdm\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n return np.vstack(list_of_tensors)\n\n\n\n\n\ndef run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets):\n # Keep record of test accuracy. ########################################\n #accHistory = {}\n\n\n\n # Hyper parameter history. #############################################\n hpHistory = []\n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':2,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':2,\n 'l1_poolStrides':2,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':8,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2}\n )\n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':4,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':2,\n 'l1_poolStrides':2,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':4,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2}\n )\n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':4,\n 'l1_strides':2,\n 'l1_padding':'valid',\n 'l1_poolSize':2,\n 'l1_poolStrides':2,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':2,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':4,\n 'l3_strides':2,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2}\n )\n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':4,\n 'l1_strides':4,\n 'l1_padding':'valid',\n 'l1_poolSize':2,\n 'l1_poolStrides':2,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':2,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':4,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2}\n )\n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':8,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':4,\n 'l1_poolStrides':4,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':4,\n 'l2_poolStrides':4,\n \n 'l3_filters':64,\n 'l3_kSize':2,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':4,\n 'l3_poolStrides':4}\n )\n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':8,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':4,\n 'l1_poolStrides':4,\n \n 'l2_filters':32,\n 'l2_kSize':8,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':4,\n 'l2_poolStrides':4,\n \n 'l3_filters':64,\n 'l3_kSize':8,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':4,\n 'l3_poolStrides':4}\n )\n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':8,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':4,\n 'l1_poolStrides':4,\n \n 'l2_filters':32,\n 'l2_kSize':8,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':8,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2}\n )\n \n \n hpHistory.append({ 'l1_filters':16,\n 'l1_kSize':4,\n 'l1_strides':1,\n 'l1_padding':'valid',\n 'l1_poolSize':2,\n 'l1_poolStrides':2,\n \n 'l2_filters':32,\n 'l2_kSize':4,\n 'l2_strides':1,\n 'l2_padding':'valid',\n 'l2_poolSize':2,\n 'l2_poolStrides':2,\n \n 'l3_filters':64,\n 'l3_kSize':4,\n 'l3_strides':1,\n 'l3_padding':'valid',\n 'l3_poolSize':2,\n 'l3_poolStrides':2,\n \n 'l4_filters':64,\n 'l4_kSize':4,\n 'l4_strides':1,\n 'l4_padding':'valid',\n 'l4_poolSize':2,\n 'l4_poolStrides':2}\n )\n\n \n \n\n\n # Loop through the different param settings. ###########################\n\n for iSetting in range(len(hpHistory)):\n current_setting = hpHistory[iSetting]\n print('Testing setting {n:g} ***************************************************************************'.format(n = iSetting))\n startTime = time.time()\n print('Setting up model.')\n # Build the CNN. #######################################################\n model = Sequential()\n\n # First convolutional layer.\n model.add( Conv2D( filters = hpHistory[iSetting]['l1_filters'],\n kernel_size = hpHistory[iSetting]['l1_kSize'],\n strides = hpHistory[iSetting]['l1_strides'],\n padding = hpHistory[iSetting]['l1_padding'],\n activation = 'relu',\n input_shape=train_tensors[0].shape,\n name = 'conv_1'\n )\n )\n model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l1_poolSize'],\n strides = hpHistory[iSetting]['l1_poolStrides'],\n padding = hpHistory[iSetting]['l1_padding'],\n name = 'pool_1'\n )\n )\n\n # Second convolutional layer.\n if 'l2_kSize' in hpHistory[iSetting].keys():\n model.add( Conv2D( filters = hpHistory[iSetting]['l2_filters'],\n kernel_size = hpHistory[iSetting]['l2_kSize'],\n strides = hpHistory[iSetting]['l2_strides'],\n padding = hpHistory[iSetting]['l2_padding'],\n activation = 'relu',\n name = 'conv_2' ))\n model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l2_poolSize'],\n strides = hpHistory[iSetting]['l2_poolStrides'],\n padding = hpHistory[iSetting]['l2_padding'],\n name = 'pool_2' ))\n # Third convolutional layer.\n if 'l3_kSize' in hpHistory[iSetting].keys():\n model.add( Conv2D( filters = hpHistory[iSetting]['l3_filters'],\n kernel_size = hpHistory[iSetting]['l3_kSize'],\n strides = hpHistory[iSetting]['l3_strides'],\n padding = hpHistory[iSetting]['l3_padding'],\n activation = 'relu',\n name = 'conv_3' ))\n model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l3_poolSize'],\n strides = hpHistory[iSetting]['l3_poolStrides'],\n padding = hpHistory[iSetting]['l3_padding'],\n name = 'pool_3' ))\n \n # Fourth convolutional layer.\n if 'l4_kSize' in hpHistory[iSetting].keys():\n model.add( Conv2D( filters = hpHistory[iSetting]['l4_filters'],\n kernel_size = hpHistory[iSetting]['l4_kSize'],\n strides = hpHistory[iSetting]['l4_strides'],\n padding = hpHistory[iSetting]['l4_padding'],\n activation = 'relu',\n name = 'conv_4' ))\n model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l4_poolSize'],\n strides = hpHistory[iSetting]['l4_poolStrides'],\n padding = hpHistory[iSetting]['l4_padding'],\n name = 'pool_4' ))\n \n # Add global pooling layer.\n model.add( GlobalAveragePooling2D() )\n \n # Add classification layer.\n model.add( Dense(133, activation='softmax') )\n model.summary()\n\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\n \n # Train the model. #####################################################\n print('')\n print('Training model.')\n epochs = 5\n\n checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5', verbose=1, save_best_only=True)\n\n model.fit(train_tensors, train_targets, validation_data=(valid_tensors, valid_targets), epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)\n\n train_time = time.time() - startTime\n \n # Load the best weights.\n model.load_weights('saved_models/weights.best.from_scratch.hdf5')\n\n\n\n # Visualize the weights. ###############################################\n print('')\n print('Creating weight images.')\n # dimensions of the generated pictures for each filter.\n img_width = train_tensors[0].shape[0]\n img_height = train_tensors[0].shape[1]\n\n # util function to convert a tensor into a valid image\n def deprocess_image(x):\n # normalize tensor: center on 0., ensure std is 0.1\n x -= x.mean()\n x /= (x.std() + K.epsilon())\n x *= 0.1\n\n # clip to [0, 1]\n x += 0.5\n x = np.clip(x, 0, 1)\n\n # convert to RGB array\n x *= 255\n if K.image_data_format() == 'channels_first':\n x = x.transpose((1, 2, 0))\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n # this is the placeholder for the input images\n input_img = model.input\n\n # get the symbolic outputs of each \"key\" layer (we gave them unique names).\n layer_dict = dict([(layer.name, layer) for layer in model.layers])\n\n def normalize(x):\n # utility function to normalize a tensor by its L2 norm\n return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())\n\n\n\n # The name of the layer we want to visualize.\n layer_names = []\n for name in layer_dict:\n if 'conv' in name:\n layer_names.append(name)\n #layer_name = 'conv_1'\n\n # Create weight images for each convolutional layer.\n for layer_name in layer_names:\n print(' Creating weight image for layer {n:s}'.format(n = layer_name))\n n_filters = layer_dict[layer_name].filters\n kept_filters = []\n for filter_index in range(n_filters):\n print(' Processing filter %d' % filter_index)\n start_time = time.time()\n\n # we build a loss function that maximizes the activation\n # of the nth filter of the layer considered\n layer_output = layer_dict[layer_name].output\n if K.image_data_format() == 'channels_first':\n loss = K.mean(layer_output[:, filter_index, :, :])\n else:\n loss = K.mean(layer_output[:, :, :, filter_index])\n\n # we compute the gradient of the input picture wrt this loss\n grads = K.gradients(loss, input_img)[0]\n\n # normalization trick: we normalize the gradient\n grads = normalize(grads)\n\n # this function returns the loss and grads given the input picture\n iterate = K.function([input_img], [loss, grads])\n\n # step size for gradient ascent\n step = 1.\n\n # we start from a gray image with some random noise\n if K.image_data_format() == 'channels_first':\n input_img_data = np.random.random((1, 3, img_width, img_height))\n else:\n input_img_data = np.random.random((1, img_width, img_height, 3))\n input_img_data = (input_img_data - 0.5) * 20 + 128\n\n # we run gradient ascent for 20 steps\n for i in range(30):\n loss_value, grads_value = iterate([input_img_data])\n input_img_data += grads_value * step\n\n #print('Current loss value:', loss_value)\n stdout.write('{r:s} Current loss value: {n:2.2f}'.format(r = '\\r', n = loss_value))\n stdout.flush()\n if loss_value <= 0.:\n # some filters get stuck to 0, we can skip them\n break\n print('')\n\n # Decode the resulting input image.\n img = deprocess_image(input_img_data[0])\n kept_filters.append((img, loss_value))\n end_time = time.time()\n print(' Filter %d processed in %ds' % (filter_index, end_time - start_time))\n\n\n # Create the image and save it.\n n = 8\n if n_filters <=36:\n n = 6\n if n_filters <= 25:\n n = 5\n if n_filters <= 16:\n n = 4\n if n_filters <= 9:\n n = 3\n if n_filters <=4:\n n = 2\n\n # The filters that have the highest loss are assumed to be better-looking. Sort by loss.\n kept_filters.sort(key=lambda x: x[1], reverse=True)\n\n # Build a black picture with enough space for all filter images.\n # Keep 5px margin between pictures.\n margin = 5\n width = n * img_width + (n - 1) * margin\n height = n * img_height + (n - 1) * margin\n stitched_filters = np.zeros((width, height, 3))\n\n # fill the picture with our saved filters\n for i in range(n):\n for j in range(n):\n try:\n img, loss = kept_filters[i * n + j]\n stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,\n (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img\n except IndexError:\n pass\n\n # Save the result to disk\n print(' Saving image.')\n cv2.imwrite('weightImages/hp{n:g}_{l:s}.png'.format(n = iSetting, l = layer_name), stitched_filters)\n\n\n\n # Test the CNN. ######################################################\n # get index of predicted dog breed for each image in test set\n dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]\n\n # Report test accuracy\n test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)\n print('')\n print('Test accuracy: %.4f%%' % test_accuracy)\n\n hpHistory[iSetting]['accuracy'] = test_accuracy\n hpHistory[iSetting]['time'] = train_time\n hpHistory[iSetting]['i'] = iSetting\n \n # Save the results.\n with open('results', 'w') as file:\n file.write(json.dumps(hpHistory))\n \n print('Done in {n:g} seconds.'.format(n = time.time() - startTime))\n print('')\n print('')\n \n\n\nif __name__ == \"__main__\":\n \n print('Loading data.')\n # load train, test, and validation datasets\n train_files, train_targets = load_dataset('dogImages/train')\n valid_files, valid_targets = load_dataset('dogImages/valid')\n test_files, test_targets = load_dataset('dogImages/test')\n\n # load list of dog names\n dog_names = [item[20:-1] for item in sorted(glob(\"dogImages/train/*/\"))]\n\n\n\n print('Preparing tensors.')\n from PIL import ImageFile \n ImageFile.LOAD_TRUNCATED_IMAGES = True \n\n # pre-process the data for Keras\n train_tensors = paths_to_tensor(train_files).astype('float32')/255\n valid_tensors = paths_to_tensor(valid_files).astype('float32')/255\n test_tensors = paths_to_tensor(test_files).astype('float32')/255\n\n\n print('Running.')\n run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets)", "sub_path": "step2.py", "file_name": "step2.py", "file_ext": "py", "file_size_in_byte": 20779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "sklearn.datasets.load_files", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 35, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 39, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 249, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 252, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 264, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 273, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 291, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 298, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 304, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling2D", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 325, "usage_type": "call"}, {"api_name": "time.time", "line_number": 329, "usage_type": "call"}, {"api_name": "keras.backend.epsilon", "line_number": 347, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 347, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 352, "usage_type": "call"}, {"api_name": "keras.backend.image_data_format", "line_number": 356, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 356, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.backend.sqrt", "line_number": 369, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 369, "usage_type": "name"}, {"api_name": "keras.backend.mean", "line_number": 369, "usage_type": "call"}, {"api_name": "keras.backend.square", "line_number": 369, "usage_type": "call"}, {"api_name": "keras.backend.epsilon", "line_number": 369, "usage_type": "call"}, {"api_name": "time.time", "line_number": 387, "usage_type": "call"}, {"api_name": "keras.backend.image_data_format", "line_number": 392, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 392, "usage_type": "name"}, {"api_name": "keras.backend.mean", "line_number": 393, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 393, "usage_type": "name"}, {"api_name": "keras.backend.mean", "line_number": 395, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 395, "usage_type": "name"}, {"api_name": "keras.backend.gradients", "line_number": 398, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 398, "usage_type": "name"}, {"api_name": "keras.backend.function", "line_number": 404, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 404, "usage_type": "name"}, {"api_name": "keras.backend.image_data_format", "line_number": 410, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 410, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 411, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 413, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 422, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 422, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 423, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 423, "usage_type": "name"}, {"api_name": "time.time", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 457, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 480, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 490, "usage_type": "call"}, {"api_name": "time.time", "line_number": 492, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 507, "usage_type": "call"}, {"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 513, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 513, "usage_type": "name"}]} +{"seq_id": "233393608", "text": "###################################\n########batch_generator############\n###################################\nimport numpy as np\n\n\ndef chunker(seq, size=32):\n # It will cut seq(a list) into lots of pieces, and len(every piece) = size\n # e.g. chunker([1,2,3,4,5],2) = [1,2]->[3,4]->[5]\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n\ndef batch_generator(list, batch_size=16):\n # Arguments:\n # list:\n # Here should see this \"list\" as an abstract and basic parameter. \n # Returns:\n # batch:\n # A subset of shuffled list, when this generator is called after a epoch's times, it will automatically reshuffle.\n while True:\n np.random.shuffle(list)\n batches = chunker(list, batch_size)\n for bat in batches:\n yield batch\n\n###################################\n##################kfold############\n###################################\nfrom sklearn.model_selection import StratifiedKFold\n\ndef stratified_k_fold(x, y, k):\n # Arguments:\n # x: data\n # y: annotation\n # k: splitting number\n # Return:\n # split_result: splitting indices which is a list and every element is a tuple with 2 length\n # tuple[0] means training set's index\n # tuple[1] means testing set's index\n skf = StratifiedKFold(n_splits=k)\n split_result = list(skf.split(x, y))\n return split_result\n\ndef get_a_fold(x, y, split_result, selected_fold_num):\n # Arguments:\n # x: data\n # y: annotation\n # split_result: sklearn's results, splitting indices\n # selected_fold_nun: selected part(1 ~ k)\n # Return:\n # x_fold: real data, x_fold[0] training data, x_fold[1] means testing data\n # annotation_fold: real annotations data, y_fold[0] means training annotations,\n # y_fold[1] means testing annotations\n x_fold = []\n x_fold.append([x[i] for i in split_result[selected_fold_num-1][0]])\n x_fold.append([x[i] for i in split_result[selected_fold_num - 1][1]])\n\n y_fold = []\n y_fold.append([y[i] for i in split_result[selected_fold_num - 1][0]])\n y_fold.append([y[i] for i in split_result[selected_fold_num - 1][1]])\n return x_fold, y_fold\n\n\n", "sub_path": "StatisticalLearning/DataGenarator/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.random.shuffle", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "409449202", "text": "#!/usr/bin python3\nfrom collections import OrderedDict\nfrom teacher import PiggyParent\nimport sys\nimport time\n\nclass Piggy(PiggyParent):\n\n '''\n *************\n SYSTEM SETUP\n *************\n '''\n\n def __init__(self, addr=8, detect=True):\n PiggyParent.__init__(self) # run the parent constructor\n\n ''' \n MAGIC NUMBERS <-- where we hard-code our settings\n '''\n self.LEFT_DEFAULT = 87\n self.RIGHT_DEFAULT = 84\n self.MIDPOINT = 1500\n self.SAFE_DISTANCE = 250 \n self.CLOSE_DISTANCE = 40 \n self.set_motor_power(self.MOTOR_LEFT + self.MOTOR_RIGHT, 0)\n self.load_defaults()\n \n def load_defaults(self):\n \"\"\"Implements the magic numbers defined in constructor\"\"\"\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)\n \n def menu(self):\n \"\"\"Displays menu dictionary, takes key-input and calls method\"\"\"\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()\n\n '''\n ****************\n STUDENT PROJECTS\n ****************\n '''\n\n def dance(self):\n \"\"\"A higher-ordered algorithm to make your robot dance\"\"\"\n \n if not self.safe_to_dance():\n return false # SHUT THE DANCE DOWN\n \n for x in range(3):\n self.strut()\n self.right_twist()\n self.strut()\n self.left_twist()\n self.backward_shimmey()\n self.spinarama()\n self.foward_shimmey()\n\n def right_twist(self):\n \"\"\"The robot turns in a right circle once\"\"\"\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n\n def left_twist(self):\n \"\"\"Robot turns in a circle once to the left\"\"\"\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()\n\n def strut(self):\n \"\"\"Robot is moving foward while looking right to left \"\"\"\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)\n\n def backward_shimmey(self):\n \"\"\"Robot is moving backwards while moving his body left and right\"\"\"\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()\n\n def spinarama(self):\n \"\"\"Robot moves in a circle to turn around and move forward\"\"\"\n for x in range(6):\n self.right(primary=-100, counter=-500)\n time.sleep(3.5)\n self.fwd()\n time.sleep(1)\n self.stop()\n\n def foward_shimmey(self):\n \"\"\"Robot moves forward while moving his body left and right\"\"\"\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()\n\n\n \n \n \n\n def safe_to_dance(self):\n \"\"\" Does a 360 distance check and returns true if safe \"\"\"\n # check for all fail/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"NOT SAFE TO DANCE!\")\n return False\n else: \n self.turn_by_deg(90) \n\n #after all checks have been done. We deduce it's safe\n print(\"SAFE TO DANCE!\")\n return True\n\n for x in range(3): \n self.shake()\n\n def shake(self):\n self. deg_fwd(720)\n slef.stop()\n\n def example_move(self):\n \"\"\"this is an example dance move that should be replaced by student-created content\"\"\"\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left\n\n def scan(self):\n \"\"\"Sweep the servo and populate the scan_data dictionary\"\"\"\n for angle in range(self.MIDPOINT-400, self.MIDPOINT+401, 100):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))\n \n # Robot will turn right or left based on data taken\n def right_or_left(self):\n \"\"\" Should I turn left or right?\n Returns a 'r' or 'l' baseed on scan data\"\"\" \n right_sum = 0\n right_avg = 0\n left_sum = 0\n left_avg = 0\n self.scan()\n for angle in self.scan_data:\n if angle < self.MIDPOINT:\n right_sum += self.scan_data[angle]\n right_avg += 1\n else: \n left_avg += self.scan_data[angle]\n left_avg += 1\n\n left_avg = left_sum / left_avg \n right_avg = right_sum / right_avg\n\n if left_avg > right_avg: \n return 'l' \n else:\n return 'r'\n\n\n\n\n\n\n\n\n\n\n def obstacle_count(self):\n \"\"\"Does a 360 scan and returns the number of obstacles it sees\"\"\"\n for x in range(6):\n # do a scan of the area in front of the robot\n self.scan()\n\n \n see_an_object = False\n count = 0 \n # Do a scan and count the amount of objects in the way\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object: \n see_an_object = True\n count += 1\n print(\"~~~ I SEE SOMETHING!! ~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\") \n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n self.turn_by_deg(90)\n print(\"\\nI saw %d objects\" % count) \n\n\n\n def quick_check(self):\n \"\"\" Moves the servo to three angles and performs a distance check \"\"\"\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True\n\n def turn_until_clear(self):\n \"\"\" Rotateb right until no obstacle is seen \"\"\"\n print(\"Turning until clear\")\n # make sure we're looking straight\n self.servo(self.MIDPOINT)\n while self.read_distance() < self.SAFE_DISTANCE + 200:\n self.left(primary=40, counter=-40)\n time.sleep(.05)\n # stop motion before we end the method\n self.turn_by_deg(25)\n self.stop()\n\n\n\n def nav(self):\n \"\"\" Auto-pilot Porgram \"\"\"\n print(\"-----------! NAVIGATION ACTIVATED !------------\\n\")\n print(\"-------- [ Press CTRL + C to stop me ] --------\\n\")\n print(\"-----------! NAVIGATION ACTIVATED !------------\\n\")\n \n \n exit_ang = self.get_heading() \n while True: \n if not self.quick_check(): \n self.stop()\n self.back()\n time.sleep(.5)\n self.stop() \n self.turn_to_deg(exit_ang)\n self.turn_until_clear() # biased toward one side\n else:\n self.fwd(right = 100, left = 100)\n\n # TODO: scan so we can decide left or right\n # TODO: average the right side of the scan dict\n # TODO: average the left side of the scan dict\n \n\n\n###########\n## MAIN APP\nif __name__ == \"__main__\": # only run this loop if this is the main file\n\n p = Piggy()\n\n if sys.version_info < (3, 0):\n sys.stdout.write(\"Sorry, requires Python 3.x\\n\")\n p.quit()\n\n try:\n while True: # app loop\n p.menu()\n\n except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.\n p.quit() \n", "sub_path": "student.py", "file_name": "student.py", "file_ext": "py", "file_size_in_byte": 9303, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "teacher.PiggyParent", "line_number": 7, "usage_type": "name"}, {"api_name": "teacher.PiggyParent.__init__", "line_number": 16, "usage_type": "call"}, {"api_name": "teacher.PiggyParent", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 102, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 131, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 177, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 243, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 256, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 275, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 294, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 295, "usage_type": "attribute"}]} +{"seq_id": "73269817", "text": "from os import listdir,path\nfrom pickle import load\nfrom face_recognition import load_image_file,face_locations,face_encodings\nimport face_training\nfrom collections import namedtuple\nimport time\n\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\nface = namedtuple('face', 'picname predictions distance neighbors')\n\ndef predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6,neighors_distance_threshold=0.7):\n faces_list = []\n with open(model_path, 'rb') as f: knn_clf = load(f)\n with open('Y_list.dict', 'rb') as F: Y_dict_mapping = load(F) \n X_img = load_image_file(X_img_path)\n X_face_locations = face_locations(X_img)\n if len(X_face_locations) == 0: \n faces_list.append(face(X_img_path,' ',' ',' '))\n return faces_list\n faces_encodings = face_encodings(X_img, known_face_locations=X_face_locations)\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors= len(Y_dict_mapping)) \n for n in range(len(closest_distances[0])):\n ni = []\n if closest_distances[0][0][n] <= distance_threshold:\n pri,dis = knn_clf.predict(faces_encodings)[n],1 - closest_distances[0][0][n]\n neighors_distance_threshold=0.7\n else :\n pri,dis = 'Unknown',0\n neighors_distance_threshold=0.8\n for j in range(len(closest_distances[0][n])):\n if closest_distances[0][n][j] <= neighors_distance_threshold:\n if not Y_dict_mapping[closest_distances[1][n][j]] == knn_clf.predict(faces_encodings)[n]:\n ni.append(Y_dict_mapping[closest_distances[1][n][j]])\n\n faces_list.append(face(X_img_path,pri,dis,ni))\n return tuple(faces_list)\n\n#if __name__ == \"__main__\":\n #print(\"Training KNN classifier...\")\n #face_training.train(\"knn_examples/train\", model_save_path=\"trained_knn_model.clf\")\n #print(\"Training complete!\")\n #t0 = time.time()\ndef main(impath=\"knn_examples/test\"):\n for image_file in listdir(impath):\n prediction = predict(path.join(impath, image_file), model_path=\"trained_knn_model.clf\")\n for i in prediction: \n if i.predictions == ' ': print ('There is no face in the Picture')\n elif len(i.neighbors) > 0 : print (i.picname,i.predictions,'---->',i.neighbors)\n elif len(i.neighbors) == 0: print (i.picname,i.predictions)\n", "sub_path": "servingstatic/face_detection_and_matching.py", "file_name": "face_detection_and_matching.py", "file_ext": "py", "file_size_in_byte": 2357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.namedtuple", "line_number": 9, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 13, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 14, "usage_type": "call"}, {"api_name": "face_recognition.load_image_file", "line_number": 15, "usage_type": "call"}, {"api_name": "face_recognition.face_locations", "line_number": 16, "usage_type": "call"}, {"api_name": "face_recognition.face_encodings", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "506119430", "text": "from bs4 import BeautifulSoup\nimport pandas as pd\nimport os\nimport lxml\nimport settings\n\n\ndef grade(name, points_per_test, comments, ok):\n#Grade Results\n results= {q[:-3]:ok.grade(q[:-3]) for q in os.listdir(\"tests\") if q.startswith('q')}\n\n #If running locally with lots of notebooks load the grades.\n df = pd.DataFrame()\n row=df.shape[0]\n df.loc[row,'student']=name #This is set in the last.\n #df.loc[row,'rcsid']=rcsid #This is set in the last.\n total_grade=0\n #This loops through the results\n for key, val in results.items():\n df.loc[row,key]=val.grade\n results_key=str(key)+\"-failed\"\n df.loc[row,key]=val.grade*points_per_test\n #We use beautiful soup to parse the tests.\n soup = BeautifulSoup(str(val.failed_tests), \"html.parser\")\n #There are multiple components, but the expected data seems most valuable.\n got = soup.get_text().split('\\\\n')[16:20]\n df.loc[row,results_key]=str(got)\n total_grade+=df.loc[row,key] #total grade\n df.loc[row,'total_grade']=total_grade\n df.loc[row,'comments']=comments\n\n if not os.path.isfile('grades.csv'):\n df.to_csv('grades.csv', index=False)\n else: # else it exists so append without writing the header\n df.to_csv('grades.csv', mode='a', header=False,index=False)\n return df\n", "sub_path": "notebooks/grade.py", "file_name": "grade.py", "file_ext": "py", "file_size_in_byte": 1334, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "633978450", "text": "from django.urls import path\nfrom api import views\n\n\nurlpatterns = [\n path('companies/', views.companies),\n path('companies//', views.company),\n path('companies//vacancies/', views.company_vacancies),\n path('vacancies/', views.vacancies),\n path('vacancies/', views.vacancy),\n path('vacancies/top_ten/', views.vacancies_top)\n]", "sub_path": "week11/hh_back/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "api.views.companies", "line_number": 6, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "api.views.company", "line_number": 7, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "api.views.company_vacancies", "line_number": 8, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "api.views.vacancies", "line_number": 9, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "api.views.vacancy", "line_number": 10, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "api.views.vacancies_top", "line_number": 11, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "237568197", "text": "from kivy.uix.behaviors import ButtonBehavior\nfrom kivy.uix.image import Image\nfrom screenfactory import ScreenFactory\n\nGAME_PREFIX = \"cellid\"\n\nclass CellScreen(ScreenFactory):\n def __init__(self, **kwargs):\n super(CellScreen, self).__init__(GAME_PREFIX, **kwargs)\n self.parse()\n\nclass CellButton(ButtonBehavior, Image):\n \"\"\"Determines the behavior of a cell part.\"\"\"\n def __init__(self, **kwargs):\n super(CellButton, self).__init__(**kwargs)\n self.is_current = True\n\n def on_press(self):\n if self.is_current:\n print(\"Correct\")\n self.toggle_current()\n else:\n print(\"Incorrect\")\n\n\n def toggle_current(self):\n self.is_current = not self.is_current", "sub_path": "minigame/cell.py", "file_name": "cell.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "screenfactory.ScreenFactory", "line_number": 7, "usage_type": "name"}, {"api_name": "kivy.uix.behaviors.ButtonBehavior", "line_number": 12, "usage_type": "name"}, {"api_name": "kivy.uix.image.Image", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "411601710", "text": "#%% INFO\n# Simple script to fetch block info from a Substrate node using:\n# https://github.com/paritytech/substrate-api-sidecar\n#\nimport requests\nimport json\nimport time\nimport pickle\nimport argparse\n\nclass Sync:\n\tdef __init__(self, endpoint, write, use_json, start_block, end_block, continue_sync, fprefix):\n\t\t# User inputs\n\t\tself.endpoint = endpoint\n\t\tself.write = write\n\t\tself.use_json = use_json\n\t\tself.start_block = start_block\n\t\tself.end_block = end_block\n\t\tself.continue_sync = continue_sync\n\t\tself.file_prefix = fprefix\n\n\t\t# Constructors\n\t\tself.blocks = []\n\n\t\tself.process_inputs()\n\t\n\tdef get_block(self, index: int):\n\t\treturn self.blocks[index]\n\n\tdef process_inputs(self):\n\t\tif self.end_block > 0:\n\t\t\tassert(self.end_block > self.start_block)\n\t\tif self.end_block == 0:\n\t\t\tself.end_block = self.get_chain_height()\n\n\t# Construct a path to some sidecar info.\n\tdef construct_url(self, path=None, param1=None, param2=None):\n\t\tbase_url = self.endpoint\n\t\tif path:\n\t\t\turl = base_url + '/' + str(path)\n\t\t\tif param1 or param1 == 0:\n\t\t\t\turl = url + '/' + str(param1)\n\t\t\t\tif param2 or param2 == 0:\n\t\t\t\t\turl = url + '/' + str(param2)\n\t\treturn url\n\n\t# Request some data from sidecar.\n\tdef sidecar_request(self, endpoint):\n\t\ttry:\n\t\t\tresponse = requests.get(endpoint)\n\t\texcept:\n\t\t\tprint('Unable to connect to sidecar.')\n\t\t\n\t\tdata = {}\n\t\tif response.ok:\n\t\t\tdata = json.loads(response.text)\n\t\telse:\n\t\t\terror_message = 'Response Error: {}'.format(response.status_code)\n\t\t\tprint(error_message)\n\t\t\tdata = { 'error' : error_message }\n\t\treturn data\n\n\t# Get the block number of the current finalized head.\n\tdef get_chain_height(self):\n\t\turl = self.construct_url('block')\n\t\tlatest_block = self.sidecar_request(url)\n\t\tif 'error' in latest_block.keys():\n\t\t\tprint('Warn! Bad response from client. Returning genesis block.')\n\t\t\treturn 0\n\t\tself.process_block(latest_block)\n\t\tchain_height = latest_block['number']\t\n\t\treturn chain_height\n\t\n\tdef fetch_block(self, number: int):\n\t\turl = self.construct_url('block', number)\n\t\tblock = self.sidecar_request(url)\n\t\tif 'error' in block.keys():\n\t\t\tprint('Warn! Bad response from client on block {}.'.format(number))\n\t\tself.process_block(block)\n\t\treturn block\n\n\t# A bunch of asserts to make sure we have a valid block. Make block number an int.\n\tdef process_block(self, block: dict, block_number=None):\n\t\tassert('number' in block.keys())\n\t\tblock['number'] = int(block['number'])\n\t\tassert('stateRoot' in block.keys())\n\t\tassert('onInitialize' in block.keys())\n\t\tassert('extrinsics' in block.keys())\n\t\tassert('onFinalize' in block.keys())\n\t\tif block_number:\n\t\t\tassert(int(block['number']) == block_number)\n\t\tif int(block['number']) % 2_000 == 0:\n\t\t\tself.print_block_info(block)\n\n\t# Print some info about a block. Mostly used to show that sync is progressing.\n\tdef print_block_info(self, block: dict):\n\t\tprint(\n\t\t\t'Block {:>9,} has state root {}'.format(\n\t\t\t\tint(block['number']), block['stateRoot']\n\t\t\t)\n\t\t)\n\n\t# Actually get blocks.\n\tdef sync(self, from_block=0, to_block=None):\n\t\tif not to_block:\n\t\t\tto_block = self.get_chain_height()\n\n\t\tfor block_number in range(from_block, to_block):\n\t\t\tblock = self.fetch_block(block_number)\n\t\t\tself.blocks.append(block)\n\n\t# Get the block number of the highest synced block.\n\tdef get_highest_synced(self):\n\t\thighest_synced = 0\n\t\tif len(self.blocks) > 0:\n\t\t\thighest_synced = self.blocks[-1]['number']\n\t\treturn highest_synced\n\n\t# The main logic about adding new blocks to the chain.\n\tdef add_new_blocks(self, highest_synced: int, chain_tip: int):\n\t\t# `highest_synced + 1` here because we only really want blocks with a child.\n\t\tif chain_tip == highest_synced + 1:\n\t\t\tprint('Chain synced at height {:,}'.format(chain_tip))\n\t\t\tself.sleep(10)\n\t\telif chain_tip > highest_synced + 1:\n\t\t\tself.sync(highest_synced + 1, chain_tip)\n\t\t\tself.sleep(1)\n\t\telif chain_tip < highest_synced + 1:\n\t\t\tprint('This is impossible, therefore somebody messed up.')\n\t\t\tself.sleep(10)\n\n\t# Wait, but if interrupted, exit.\n\tdef sleep(self, sec: int):\n\t\ttry:\n\t\t\ttime.sleep(sec)\n\t\texcept KeyboardInterrupt:\n\t\t\tself.write_and_exit()\n\n\t# Ask user if they want to save the block data and then exit.\n\tdef write_and_exit(self):\n\t\tsavedata = input('Do you want to save the block data? (y/N): ')\n\t\tif savedata.lower() == 'y':\n\t\t\tself.write_to_file()\n\t\texit()\n\n\t# Write all blocks to a single file.\n\tdef write_to_file(self):\n\t\tfname = input('Input a filename: ')\n\t\tif self.use_json:\n\t\t\tfn = fname + '.data'\n\t\t\twith open(fn, 'w') as f:\n\t\t\t\tjson.dump(self.blocks, f)\n\t\telse:\n\t\t\tfn = fname + '.pickle'\n\t\t\twith open(fn, 'wb') as f:\n\t\t\t\tpickle.dump(self.blocks, f)\n\n\t# Write a single block to a JSON file.\n\tdef write_block_to_file(self, reason='info'):\n\t\tfname = 'blocks/{}-{}-{}.json'.format(\n\t\t\tself.file_prefix,\n\t\t\tblock['number'],\n\t\t\treason\n\t\t)\n\t\twith open(fname, 'w') as f:\n\t\t\tjson.dump(block, f, indent=2)\n\n\t# Read blocks from a file.\n\tdef read_from_file(self, start_desired: int, end_desired: int):\n\t\tprint('Importing blocks...')\n\t\ttry:\n\t\t\tif use_json:\n\t\t\t\tfname = self.file_prefix + '.data'\n\t\t\t\twith open(fname, 'r') as f:\n\t\t\t\t\tself.blocks = json.load(f)\n\t\t\telse:\n\t\t\t\tfname = self.file_prefix + '.pickle'\n\t\t\t\twith open(fname, 'rb') as f:\n\t\t\t\t\tself.blocks = pickle.load(f)\n\t\texcept:\n\t\t\tprint('No data file.')\n\t\t\tself.blocks = []\n\t\tif blockdata:\n\t\t\tprint('Imported {:,} blocks.'.format(len(self.blocks)))\n\t\t\tstart_block = self.blocks[0]['number']\n\t\t\tend_block = self.block[-1]['number']\n\t\t\tif start_block <= start_desired and end_block >= end_desired:\n\t\t\t\t# TODO: Prune to desired set.\n\t\t\t\tprint('Imported blocks {} to {}.'.format(start_block, end_block))\n\t\t\telse:\n\t\t\t\t# TODO: Return the partial set and sync around it.\n\t\t\t\tself.blocks = []\n\t\t\t\tprint('Block data exists but does not cover desired blocks.')\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t'-w', '--write-files',\n\t\thelp='Write blocks that have duplicate transactions to files.',\n\t\taction='store_true'\n\t)\n\tparser.add_argument(\n\t\t'-j', '--json',\n\t\thelp='Import blocks from JSON (plaintext) file. Slower than the default, pickle.',\n\t\taction='store_true'\n\t)\n\targs = parser.parse_args()\n\n\twrite = args.write_files\n\tuse_json = args.json\n\treturn (write, use_json)\n\nif __name__ == \"__main__\":\n\t(write, use_json) = parse_args()\n\tstart_block = 2349900\n\tmax_block = 0\n\n\tendpoint = 'http://127.0.0.1:8080'\n\tsyncer = Sync(endpoint, write, use_json, start_block, max_block, True, 'blockdata')\n\n\tif max_block == 0:\n\t\tmax_block = syncer.get_chain_height()\n\tprint('Starting sync from block {} to block {}'.format(start_block, max_block))\n\tblocks = syncer.sync(start_block, max_block)\n\t# blocks = read_from_file(0, 10)\n\n\tif syncer.continue_sync:\n\t\twhile True:\n\t\t\thighest_synced = syncer.get_highest_synced()\n\t\t\tchain_tip = syncer.get_chain_height()\n\t\t\tblocks = syncer.add_new_blocks(highest_synced, chain_tip)\n\telse:\n\t\tsyncer.write_and_exit()\n", "sub_path": "sync.py", "file_name": "sync.py", "file_ext": "py", "file_size_in_byte": 6835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 152, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 156, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 166, "usage_type": "call"}, {"api_name": "json.load", "line_number": 175, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 179, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "29452654", "text": "# Imports\nimport time\nimport datetime\nimport win32api\nimport socket\nimport _thread\n\nif socket.gethostname().find('.')>=0:\n hostname=socket.gethostname()\nelse:\n hostname=socket.gethostbyaddr(socket.gethostname())[0]\n\n# Functions\ndef main():\n\tidleflag = False\n\twork_start, work_stop = 0, 0\n\tidle_start, idle_stop = 0, 0\n\t\n\tx1 = win32api.GetLastInputInfo()\n\t\n\twhile True:\n\t\ttime.sleep(1)\n\t\tx2 = win32api.GetLastInputInfo()\n\t\tif x1 == x2:\n\t\t\tif not idleflag:\n\t\t\t\twork_stop = time.clock()\n\t\t\t\tidle_start = work_stop\n\t\t\t\twith open(datetime.datetime.now().strftime(\"%m%d\") + \"_\" + hostname + \"_idle.csv\", \"a\") as w:\n\t\t\t\t\tmsg = \"Idle Start,\" + str(datetime.datetime.now()) + \",Worked Duration,\" + str(work_stop-work_start) + \"\\n\"\n\t\t\t\t\tw.write(msg)\n\t\t\t\tw.close()\n\t\t\t\tidleflag = True\n\t\telse:\n\t\t\tif idleflag:\n\t\t\t\tidle_stop = time.clock()\n\t\t\t\twork_start = idle_stop\n\t\t\t\twith open(datetime.datetime.now().strftime(\"%m%d\") + \"_\" + hostname + \"_idle.csv\", \"a\") as w:\n\t\t\t\t\tmsg = \"Idle Stop,\" + str(datetime.datetime.now()) + \",Idle Duration,\" + str(idle_stop-idle_start) + \"\\n\"\n\t\t\t\t\tw.write(msg)\n\t\t\t\tw.close()\n\t\t\t\tidleflag = False\n\t\tx1 = x2\n\t\t\t\n# __main__()\nif __name__ == \"__main__\":\n\ttry:\n\t\t_thread.start_new_thread( main, () )\n\texcept:\n\t\tprint(\"Error: unable to start thread\")\n\t\t\n\twhile 1:\n\t\tpass", "sub_path": "Py-dle.py", "file_name": "Py-dle.py", "file_ext": "py", "file_size_in_byte": 1289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "socket.gethostname", "line_number": 8, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.gethostbyaddr", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 11, "usage_type": "call"}, {"api_name": "win32api.GetLastInputInfo", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "win32api.GetLastInputInfo", "line_number": 23, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "attribute"}, {"api_name": "_thread.start_new_thread", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "262590487", "text": "import numpy as np\nimport os.path as osp\nimport tensorflow as tf\nimport gym\nimport time\n\nfrom core import ReplayBuffer\nfrom spinup.algos.tf1.td3 import core\nfrom spinup.algos.tf1.td3.core import get_vars\nfrom spinup.user_config import DEFAULT_DATA_DIR\nfrom spinup.utils.logx import EpochLogger\nfrom spinup.utils.test_policy import load_policy_and_env\n\n\ndef td3(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=None, \n steps_per_epoch=10000, epochs=10000, replay_size=int(2.5e6), gamma=0.99, \n polyak=0.995, pi_lr=1e-4, q_lr=1e-4, batch_size=256, start_steps=10000, \n update_after=10000, update_every=50, act_noise=0.1, target_noise=0.1, \n noise_clip=0.5, policy_delay=2, num_test_episodes=50, max_ep_len=900, \n logger_kwargs=dict(), save_freq=1, sess=None, load_1vs1=\"\", num=0,\n render=False, test_env_fn=None, use_es=True):\n \n \"\"\"\n Twin Delayed Deep Deterministic Policy Gradient (TD3)\n\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: A function which takes in placeholder symbols \n for state, ``x_ph``, and action, ``a_ph``, and returns the main \n outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``pi`` (batch, act_dim) | Deterministically computes actions\n | from policy given states.\n ``q1`` (batch,) | Gives one estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q2`` (batch,) | Gives another estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q1_pi`` (batch,) | Gives the composition of ``q1`` and \n | ``pi`` for states in ``x_ph``: \n | q1(x, pi(x)).\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_critic \n function you provided to TD3.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target \n networks. Target networks are updated towards main networks \n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow \n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually \n close to 1.)\n\n pi_lr (float): Learning rate for policy.\n\n q_lr (float): Learning rate for Q-networks.\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n update_after (int): Number of env interactions to collect before\n starting to do gradient descent updates. Ensures replay buffer\n is full enough for useful updates.\n\n update_every (int): Number of env interactions that should elapse\n between gradient descent updates. Note: Regardless of how long \n you wait between updates, the ratio of env steps to gradient steps \n is locked to 1.\n \n act_noise (float): Stddev for Gaussian exploration noise added to \n policy at training time. (At test time, no noise is added.)\n\n target_noise (float): Stddev for smoothing noise added to target \n policy.\n\n noise_clip (float): Limit for absolute value of target policy \n smoothing noise.\n\n policy_delay (int): Policy will only be updated once every \n policy_delay times for each update of the Q-networks.\n\n num_test_episodes (int): Number of episodes to test the deterministic\n policy at the end of each epoch.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n max_ep_ret = -1e6\n success_rate = 0\n\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env, test_env_fn = env_fn(), test_env_fn if test_env_fn is not None else env_fn\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n num_players = env.num_players\n assert num_players == 4\n assert num_players == test_env_fn().num_players\n \n # Define indexes to use based on usage of experience sharing\n es_1_idxs = [0, 1, 2] if use_es else [2]\n es_2_idxs = [0, 1, 3] if use_es else [3]\n es_rb_idxs = [0, 1, 2, 3] if use_es else [2, 3]\n\n if sess is None:\n sess = tf.Session()\n\n # Inputs to computation graph\n x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n with tf.variable_scope('player_1'):\n pi_1, q1_1, q2_1, q1_pi_1 = actor_critic(x_ph, a_ph, **ac_kwargs)\n with tf.variable_scope('player_2'):\n pi_2, q1_2, q2_2, q1_pi_2 = actor_critic(x_ph, a_ph, **ac_kwargs)\n \n # Target policy network\n with tf.variable_scope('target'):\n with tf.variable_scope('player_1'):\n pi_targ_1, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs)\n with tf.variable_scope('player_2'):\n pi_targ_2, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs)\n \n # Target Q networks\n with tf.variable_scope('target', reuse=True):\n\n # Target policy smoothing, by adding clipped noise to target actions\n epsilon_1 = tf.random_normal(tf.shape(pi_targ_1), stddev=target_noise)\n epsilon_1 = tf.clip_by_value(epsilon_1, -noise_clip, noise_clip)\n a2_1 = pi_targ_1 + epsilon_1\n a2_1 = tf.clip_by_value(a2_1, -act_limit, act_limit)\n \n epsilon_2 = tf.random_normal(tf.shape(pi_targ_2), stddev=target_noise)\n epsilon_2 = tf.clip_by_value(epsilon_2, -noise_clip, noise_clip)\n a2_2 = pi_targ_2 + epsilon_2\n a2_2 = tf.clip_by_value(a2_2, -act_limit, act_limit)\n\n # Target Q-values, using action from target policy\n with tf.variable_scope('player_1'):\n _, q1_targ_1, q2_targ_1, _ = actor_critic(x2_ph, a2_1, **ac_kwargs)\n with tf.variable_scope('player_2'):\n _, q1_targ_2, q2_targ_2, _ = actor_critic(x2_ph, a2_2, **ac_kwargs)\n \n\n # Experience buffer\n replay_buffer = {i: ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size) for i in es_rb_idxs}\n\n # Count variables\n var_counts = tuple(2 * core.count_vars(scope) for scope in ['main/player_1/pi', 'main/player_1/q1', \n 'main/player_1/q2', 'main/player_1/'])\n print('\\nNumber of parameters: \\t pi: %d, \\t q1: %d, \\t q2: %d, \\t total: %d\\n'%var_counts)\n\n # Bellman backup for Q functions, using Clipped Double-Q targets\n min_q_targ_1 = tf.minimum(q1_targ_1, q2_targ_1)\n min_q_targ_2 = tf.minimum(q1_targ_2, q2_targ_2)\n backup_1 = tf.stop_gradient(r_ph + gamma * (1 - d_ph) * min_q_targ_1)\n backup_2 = tf.stop_gradient(r_ph + gamma * (1 - d_ph) * min_q_targ_2)\n\n # TD3 losses\n pi_loss_1 = -tf.reduce_mean(q1_pi_1)\n q1_loss_1 = tf.reduce_mean((q1_1 - backup_1) ** 2)\n q2_loss_1 = tf.reduce_mean((q2_1 - backup_1) ** 2)\n q_loss_1 = q1_loss_1 + q2_loss_1\n \n pi_loss_2 = -tf.reduce_mean(q1_pi_2)\n q1_loss_2 = tf.reduce_mean((q1_2 - backup_2) ** 2)\n q2_loss_2 = tf.reduce_mean((q2_2 - backup_2) ** 2)\n q_loss_2 = q1_loss_2 + q2_loss_2\n\n # Separate train ops for pi, q\n pi_optimizer_1 = tf.train.AdamOptimizer(learning_rate=pi_lr)\n q_optimizer_1 = tf.train.AdamOptimizer(learning_rate=q_lr)\n \n pi_optimizer_2 = tf.train.AdamOptimizer(learning_rate=pi_lr)\n q_optimizer_2 = tf.train.AdamOptimizer(learning_rate=q_lr)\n\n train_pi_op_1 = pi_optimizer_1.minimize(pi_loss_1, var_list=(get_vars('main/player_1/pi')))\n train_pi_op_2 = pi_optimizer_2.minimize(pi_loss_2, var_list=(get_vars('main/player_2/pi')))\n \n train_q_op_1 = q_optimizer_1.minimize(q_loss_1, var_list=(get_vars('main/player_1/q')))\n train_q_op_2 = q_optimizer_2.minimize(q_loss_2, var_list=(get_vars('main/player_2/q')))\n\n sess.run(tf.global_variables_initializer())\n \n assert len(load_1vs1) == 2\n assert 2 == len(num)\n \n g1 = tf.Graph()\n with g1.as_default():\n __, _get_1v1_action_p1 = load_policy_and_env(osp.join(DEFAULT_DATA_DIR, load_1vs1[0]), num[0], sess=None)\n \n g2 = tf.Graph()\n with g2.as_default():\n __, _get_1v1_action_p2 = load_policy_and_env(osp.join(DEFAULT_DATA_DIR, load_1vs1[1]), num[1], sess=None)\n \n get_1v1_action_1 = lambda x: [_get_1v1_action_p1(x)]\n get_1v1_action_2 = lambda x: [_get_1v1_action_p2(x)]\n\n # Polyak averaging for target variables\n target_update_1 = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)\n for v_main, v_targ in zip(get_vars('main/player_1'), get_vars('target/player_1'))])\n \n target_update_2 = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)\n for v_main, v_targ in zip(get_vars('main/player_2'), get_vars('target/player_2'))])\n\n # Initializing targets to match main variables\n target_init_1 = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main/player_1'), get_vars('target/player_1'))])\n \n target_init_2 = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main/player_2'), get_vars('target/player_2'))])\n\n sess.run([target_init_1, target_init_2])\n\n # Setup model saving\n logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, outputs={'pi_1': pi_1, 'q1_1': q1_1, 'q2_1': q2_1,\n 'pi_2': pi_2, 'q1_2': q1_2, 'q2_2': q2_2})\n\n def get_action(o, noise_scale, batch_size=1):\n a_1 = sess.run(pi_1, feed_dict={x_ph: o[::2].reshape(batch_size, -1)})\n a_2 = sess.run(pi_2, feed_dict={x_ph: o[1::2].reshape(batch_size, -1)})\n a = np.zeros((a_1.shape[0] + a_2.shape[0], a_1.shape[1]))\n a[::2] = a_1\n a[1::2] = a_2\n a += noise_scale * np.random.randn(batch_size, act_dim)\n return [np.ravel(x) for x in np.split(np.clip(a, -act_limit, act_limit), 2 * batch_size, axis=0)]\n\n def test_agent():\n success_rate = 0\n avg_ret = np.zeros(num_players)\n test_env = test_env_fn()\n max_ep_len = test_env.time_limit // test_env.control_timestep\n for j in range(num_test_episodes):\n o = test_env.reset()\n d, ep_ret, ep_ret_sparse, ep_len = False, np.zeros(num_players), np.zeros(num_players), 0\n \n vel_to_ball = []\n spread_out = []\n intercepted = []\n intercepted_5 = []\n intercepted_10 = []\n intercepted_15 = []\n received = []\n received_5 = []\n received_10 = []\n received_15 = []\n received_p = []\n received_p_5 = []\n received_p_10 = []\n received_p_15 = []\n \n for k in range(num_players):\n \n vel_to_ball.append([])\n spread_out.append([])\n intercepted.append([])\n intercepted_5.append([])\n intercepted_10.append([])\n intercepted_15.append([])\n received.append([])\n received_5.append([])\n received_10.append([])\n received_15.append([])\n received_p.append([])\n received_p_5.append([])\n received_p_10.append([])\n received_p_15.append([])\n \n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time (noise_scale=0)\n if j % 2 == 0:\n act_1 = get_1v1_action_1(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_2(o[1][np.r_[0:18, 24:30]])\n else:\n act_1 = get_1v1_action_2(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_1(o[1][np.r_[0:18, 24:30]])\n \n a = act_1 + get_action(np.array(o[2:]), 0, (num_players - 2) // 2)\n\n o, r, d, _ = test_env.step(a)\n if j == 0 and render:\n test_env.render()\n \n for k in range(num_players):\n \n test_obs = test_env.timestep.observation[k]\n _switch_k = (2 - k - 1) if ((k < 2) and (j % 2 == 1)) else k\n \n ep_ret[_switch_k] += r[k]\n ep_ret_sparse[_switch_k] += test_env.timestep.reward[k]\n vel_to_ball[_switch_k].append(test_obs['stats_vel_to_ball'])\n spread_out[_switch_k].append(test_obs['stats_teammate_spread_out'])\n intercepted[_switch_k].append(test_obs['stats_opponent_intercepted_ball'])\n intercepted_5[_switch_k].append(test_obs['stats_opponent_intercepted_ball_5m'])\n intercepted_10[_switch_k].append(test_obs['stats_opponent_intercepted_ball_10m'])\n intercepted_15[_switch_k].append(test_obs['stats_opponent_intercepted_ball_15m'])\n received[_switch_k].append(test_obs['stats_i_received_ball'])\n received_5[_switch_k].append(test_obs['stats_i_received_ball_5m'])\n received_10[_switch_k].append(test_obs['stats_i_received_ball_10m'])\n received_15[_switch_k].append(test_obs['stats_i_received_ball_15m'])\n received_p[_switch_k].append(test_obs['stats_i_received_pass'])\n received_p_5[_switch_k].append(test_obs['stats_i_received_pass_5m'])\n received_p_10[_switch_k].append(test_obs['stats_i_received_pass_10m'])\n received_p_15[_switch_k].append(test_obs['stats_i_received_pass_15m'])\n \n ep_len += 1\n success_rate += (ep_len <= max_ep_len and test_env.timestep.reward[0] < 0) / num_test_episodes\n avg_ret += ep_ret / num_test_episodes\n\n ep_ret_dict = {}\n for i in range(num_players):\n ep_ret_dict[f\"TestEpRet_P{i + 1}\"] = ep_ret[i]\n ep_ret_dict[f\"TestEpRetSparse_P{i + 1}\"] = ep_ret_sparse[i]\n ep_ret_dict[f\"TestEpStatsVelToBall_P{i + 1}\"] = np.mean(vel_to_ball[i])\n ep_ret_dict[f\"TestEpStatsTeamSpreadOut_P{i + 1}\"] = np.mean(spread_out[i])\n ep_ret_dict[f\"TestEpStatsOpIntercepted_P{i + 1}\"] = np.mean(intercepted[i])\n ep_ret_dict[f\"TestEpStatsOpIntercepted_5m_P{i + 1}\"] = np.mean(intercepted_5[i])\n ep_ret_dict[f\"TestEpStatsOpIntercepted_10m_P{i + 1}\"] = np.mean(intercepted_10[i])\n ep_ret_dict[f\"TestEpStatsOpIntercepted_15m_P{i + 1}\"] = np.mean(intercepted_15[i])\n ep_ret_dict[f\"TestEpStatsIReceived_P{i + 1}\"] = np.mean(received[i])\n ep_ret_dict[f\"TestEpStatsIReceived_5m_P{i + 1}\"] = np.mean(received_5[i])\n ep_ret_dict[f\"TestEpStatsIReceived_10m_P{i + 1}\"] = np.mean(received_10[i])\n ep_ret_dict[f\"TestEpStatsIReceived_15m_P{i + 1}\"] = np.mean(received_15[i])\n ep_ret_dict[f\"TestEpStatsIReceivedPass_P{i + 1}\"] = np.mean(received_p[i])\n ep_ret_dict[f\"TestEpStatsIReceivedPass_5m_P{i + 1}\"] = np.mean(received_p_5[i])\n ep_ret_dict[f\"TestEpStatsIReceivedPass_10m_P{i + 1}\"] = np.mean(received_p_10[i])\n ep_ret_dict[f\"TestEpStatsIReceivedPass_15m_P{i + 1}\"] = np.mean(received_p_15[i])\n\n logger.store(**ep_ret_dict, TestEpLen=ep_len)\n\n return success_rate, avg_ret\n\n start_time = time.time()\n o = env.reset()\n ep_ret, ep_len = np.zeros(env.num_players), 0\n total_steps = steps_per_epoch * epochs\n epoch = 0\n pkl_saved = False\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n # Define whether to switch 1vs1 players\n switch = epoch % 2 == 0\n\n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy (with some noise, via act_noise). \n # Step the env\n \n if switch:\n act_1 = get_1v1_action_1(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_2(o[1][np.r_[0:18, 24:30]])\n else:\n act_1 = get_1v1_action_2(o[0][np.r_[0:18, 18:24]]) + get_1v1_action_1(o[1][np.r_[0:18, 24:30]])\n\n if t > start_steps:\n a = act_1 + get_action(np.array(o[2:]), act_noise, (num_players - 2) // 2)\n else:\n a = act_1 + [env.action_space.sample() for _ in range(2, num_players)]\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += np.array(r)\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n if not switch:\n [replay_buffer[j].store(o[j], a[j], r[j], o2[j], d) for j in es_rb_idxs]\n else:\n [replay_buffer[2 - j - 1].store(o[j], a[j], r[j], o2[j], d) for j in es_rb_idxs if j < 2]\n [replay_buffer[j].store(o[j], a[j], r[j], o2[j], d) for j in range(2, num_players)]\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of trajectory handling\n if d or (ep_len == max_ep_len):\n ep_ret_dict = {f\"EpRet_P{i + 1}\": ep_ret[i] for i in range(env.num_players)}\n logger.store(**ep_ret_dict, EpLen=ep_len)\n reached = False\n o, ep_ret, ep_len = env.reset(), np.zeros(env.num_players), 0\n\n # Update handling\n if t >= update_after and t % update_every == 0:\n for j in range(update_every):\n \n batch_dicts = [replay_buffer[j].sample_batch(batch_size // len(es_1_idxs)) for j in es_1_idxs]\n batch = {key: np.concatenate([batch_dicts[i][key] for i in range(len(es_1_idxs))], axis=0) for key in batch_dicts[0].keys()}\n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n d_ph: batch['done']\n }\n q_step_ops_1 = [q_loss_1, q1_1, q2_1, train_q_op_1]\n outs_q_1 = sess.run(q_step_ops_1, feed_dict)\n \n if j % policy_delay == 0:\n # Delayed policy update\n outs_pi_1 = sess.run([pi_loss_1, train_pi_op_1, target_update_1], feed_dict)\n \n batch_dicts = [replay_buffer[j].sample_batch(batch_size // len(es_2_idxs)) for j in es_2_idxs]\n batch = {key: np.concatenate([batch_dicts[i][key] for i in range(len(es_2_idxs))], axis=0) for key in batch_dicts[0].keys()}\n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n d_ph: batch['done']\n }\n q_step_ops_2 = [q_loss_2, q1_2, q2_1, train_q_op_2]\n outs_q_2 = sess.run(q_step_ops_2, feed_dict)\n logger.store(LossQ=outs_q_1[0] + outs_q_2[0], Q1Vals=outs_q_1[1] + outs_q_2[1], Q2Vals=outs_q_1[2] + outs_q_2[2])\n \n if j % policy_delay == 0:\n # Delayed policy update\n outs_pi_2 = sess.run([pi_loss_2, train_pi_op_2, target_update_2], feed_dict)\n logger.store(LossPi=outs_pi_1[0] + outs_pi_2[0])\n\n\n # End of epoch wrap-up\n if (t+1) % steps_per_epoch == 0:\n epoch = (t+1) // steps_per_epoch\n\n # Test the performance of the deterministic version of the agent.\n act_suc_rate, act_avg_ret = test_agent()\n\n # Save model\n print(f\"Best Success Rate: {int(success_rate * 100)}, Episode Return: {np.round(max_ep_ret, 2)}\")\n print(f\"Step Success Rate: {int(act_suc_rate * 100)}, Step Episode Return: {np.round(act_avg_ret, 2)}\", end=\". \")\n if ((epoch % save_freq == 0) or (epoch == epochs)) and (act_suc_rate >= success_rate):\n logger.save_state({'env': env}, None, not(pkl_saved))\n if not pkl_saved:\n pkl_saved = True\n tf.get_default_graph().finalize()\n if g1 is not None: g1.finalize()\n if g2 is not None: g2.finalize()\n success_rate = act_suc_rate\n max_ep_ret = act_avg_ret\n print(\"Saving model ...\")\n print(f\"New Best Success Rate: {int(success_rate * 100,)}, Average Episode Return: {np.round(max_ep_ret, 2)}\")\n\n else:\n print(\"\")\n\n if (((epoch % save_freq == 0) or (epoch == epochs)) and (act_suc_rate >= 0.4)) or (epoch % (save_freq * 10) == 0):\n logger.save_state({'env': env}, t)\n print(\"Saving model ...\")\n\n # Log info about epoch\n if t >= update_after:\n logger.log_tabular('Epoch', epoch)\n\n for i in range(num_players):\n logger.log_tabular(f'EpRet_P{i + 1}', with_min_and_max=True)\n logger.log_tabular(f'TestEpRet_P{i + 1}', with_min_and_max=True)\n logger.log_tabular(f'TestEpRetSparse_P{i + 1}', with_min_and_max=True)\n logger.log_tabular(f'TestEpStatsVelToBall_P{i + 1}', with_min_and_max=True)\n logger.log_tabular(f\"TestEpStatsTeamSpreadOut_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsOpIntercepted_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsOpIntercepted_5m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsOpIntercepted_10m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsOpIntercepted_15m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceived_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceived_5m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceived_10m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceived_15m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceivedPass_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceivedPass_5m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceivedPass_10m_P{i + 1}\")\n logger.log_tabular(f\"TestEpStatsIReceivedPass_15m_P{i + 1}\")\n\n logger.log_tabular('EpLen', with_min_and_max=True)\n logger.log_tabular('TestEpLen', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', t)\n logger.log_tabular('TestEpSuccessRate', act_suc_rate)\n logger.log_tabular('Q1Vals', with_min_and_max=True)\n logger.log_tabular('Q2Vals', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQ', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n import dm_soccer2gym\n from math import ceil\n parser = argparse.ArgumentParser()\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--epochs', type=int, default=2000)\n parser.add_argument(\"--gpu\", type=float, default=-1)\n parser.add_argument(\"--reward\", type=str, default=\"sparse\")\n parser.add_argument(\"--control_timestep\", type=float, default=0.05)\n parser.add_argument(\"--time_limit\", type=float, default=45.)\n parser.add_argument(\"--use_es\", type=bool, default=True)\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n es_tag = \"es_\" if args.use_es else \"\"\n logger_kwargs = setup_logger_kwargs(f'td3_soccer_2vs2_{es_tag}{args.reward}_{args.control_timestep}', data_dir=osp.join(DEFAULT_DATA_DIR, \"TD3/2vs2\"), datestamp=True)\n if args.gpu > 0:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu)\n\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n else:\n sess = None\n \n td3(lambda : dm_soccer2gym.make('2vs2', task_kwargs={\"rew_type\": args.reward, \"time_limit\": args.time_limit, \"disable_jump\": True, \n \"dist_thresh\": 0.03, 'control_timestep': args.control_timestep, 'observables': 'all'}), \n test_env_fn=lambda : dm_soccer2gym.make('2vs2', task_kwargs={\"rew_type\": \"simple_v2\", \"time_limit\": args.time_limit, \"disable_jump\": True, \n \"dist_thresh\": 0.03, 'control_timestep': 0.05, 'random_state': 69, 'observables': 'all'}),\n actor_critic=core.mlp_actor_critic_heads_v2,\n gamma=args.gamma, epochs=args.epochs,\n logger_kwargs=logger_kwargs,\n sess=sess, max_ep_len=ceil(args.time_limit / args.control_timestep),\n load_1vs1=[\"TD3/1vs1/2020-10-08_23-06-32_td3_soccer_1vs1_dense_0.05\",\n \"TD3/1vs1/2020-10-08_23-07-33_td3_soccer_1vs1_dense_0.05\"],\n num=[9389999, 8629999], use_es=args.use_es)\n", "sub_path": "spinup/algos/tf1/td3/td3_goal_2vs2.py", "file_name": "td3_goal_2vs2.py", "file_ext": "py", "file_size_in_byte": 27230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "spinup.algos.tf1.td3.core.mlp_actor_critic", "line_number": 15, "usage_type": "attribute"}, {"api_name": "spinup.algos.tf1.td3.core", "line_number": 15, "usage_type": "name"}, {"api_name": "spinup.utils.logx.EpochLogger", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 144, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.placeholders", "line_number": 147, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core", "line_number": 147, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 180, "usage_type": "call"}, {"api_name": "core.ReplayBuffer", "line_number": 185, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.count_vars", "line_number": 188, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core", "line_number": 188, "usage_type": "name"}, {"api_name": "tensorflow.minimum", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.stop_gradient", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.stop_gradient", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 211, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 214, "usage_type": "attribute"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 216, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 217, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 219, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 222, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 227, "usage_type": "call"}, {"api_name": "spinup.utils.test_policy.load_policy_and_env", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "spinup.user_config.DEFAULT_DATA_DIR", "line_number": 229, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 229, "usage_type": "name"}, {"api_name": "tensorflow.Graph", "line_number": 231, "usage_type": "call"}, {"api_name": "spinup.utils.test_policy.load_policy_and_env", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 233, "usage_type": "call"}, {"api_name": "spinup.user_config.DEFAULT_DATA_DIR", "line_number": 233, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 233, "usage_type": "name"}, {"api_name": "tensorflow.group", "line_number": 239, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 239, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 240, "usage_type": "call"}, {"api_name": "tensorflow.group", "line_number": 242, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 242, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 243, "usage_type": "call"}, {"api_name": "tensorflow.group", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 246, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.group", "line_number": 249, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 249, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.get_vars", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.ravel", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 364, "usage_type": "call"}, {"api_name": "time.time", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 388, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 390, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 471, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 482, "usage_type": "call"}, {"api_name": "time.time", "line_number": 522, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 529, "usage_type": "call"}, {"api_name": "spinup.utils.run_utils.setup_logger_kwargs", "line_number": 541, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 541, "usage_type": "call"}, {"api_name": "spinup.user_config.DEFAULT_DATA_DIR", "line_number": 541, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 541, "usage_type": "name"}, {"api_name": "tensorflow.GPUOptions", "line_number": 543, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 545, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 545, "usage_type": "call"}, {"api_name": "dm_soccer2gym.make", "line_number": 549, "usage_type": "call"}, {"api_name": "dm_soccer2gym.make", "line_number": 551, "usage_type": "call"}, {"api_name": "spinup.algos.tf1.td3.core.mlp_actor_critic_heads_v2", "line_number": 553, "usage_type": "attribute"}, {"api_name": "spinup.algos.tf1.td3.core", "line_number": 553, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 556, "usage_type": "call"}]} +{"seq_id": "302040004", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom tools.tools_r.smt.smt_getcid import get_cid,get_prama\nfrom gm_work.items import GmWorkItem\nimport json\nfrom scrapy_redis.spiders import RedisSpider\n\n\nclass SmtGoodsSpider(RedisSpider):\n goods_num = 0\n name = 'smt_goods'\n allowed_domains = ['aliexpress.com']\n start_urls = ['http://m.aliexpress.com']\n redis_key = \"smt_goods:start_url\"\n seeds_file = r\"C:\\Users\\admin\\Desktop\\{smt_shopid_201910_有效}[店铺ID,卖家ID].txt\"\n def start_requests(self):\n yield scrapy.Request(url=\"https://www.baidu.com\",dont_filter=True)\n\n def try_again(self,response,max_num=5,priority_adjust=0):\n try_num = response.meta.get(\"try_num\", 0) + 1\n if try_num < max_num:\n retryreq = response.request.copy()\n retryreq.meta['try_num'] = try_num\n retryreq.dont_filter = True\n retryreq.priority = response.request.priority + priority_adjust\n return retryreq\n else:\n print(\"错误大于5次\")\n\n def parse(self,response):\n for i in self.from_file(self.seeds_file):\n i = i.strip()\n if \",\" in i:\n shop_id = i.split(\",\",1)[0]\n seller_id = i.split(\",\",1)[1]\n page_id = get_prama(get_cid())\n cookies = \"aefeMsite=amp--wRru0loiCNZjcQEqYc1Ew; ali_apache_id=11.180.122.26.1575437527682.392996.5; isg=BDEx-5kOyCf7m2SmkQaxvTBcQL0LtqIM-G1_rBNGL_giOlOMW256Y8wcWIj58j3I\"\n num = 0\n url = \"https://m.aliexpress.com/api/search/products/items?pageId={}&searchType=storeSearch&sellerAdminSeq={}&storeId={}&infiniteScroll=true&start={}&shipToCountry=US&__amp_source_origin=https%3A%2F%2Fm.aliexpress.com\"\n Referer_str = \"https://m.aliexpress.com/storesearch/list/.html?sortType=TC3_D&searchType=storeSearch&trace=store2mobilestoreNew&storeId={}\"\n Referer = Referer_str.format(shop_id)\n url = url.format(page_id,seller_id,shop_id,num)\n headers = self.get_headers()\n headers[\"Cookie\"] = cookies\n headers[\"Referer\"] = Referer\n meta = {\"page_id\":page_id,\n \"seller_id\":seller_id,\n \"shop_id\":shop_id}\n yield scrapy.Request(url=url,callback=self.get_detail,method=\"GET\",headers=headers,meta=meta)\n\n def get_detail(self, response):\n meta = response.meta\n json_str = response.text\n req_url = response.url\n seller_id = meta.get(\"seller_id\")\n shop_id = meta.get(\"shop_id\")\n page_id = meta.get(\"page_id\")\n if json_str.startswith('{\"'):\n item_s = GmWorkItem()\n item_s[\"source_code\"] = json_str\n yield item_s\n json_data = json.loads(json_str)\n # success = json_data.get(\"success\")\n data = json_data.get(\"data\")\n # nextUrl = data.get(\"nextUrl\")\n items = data.get(\"items\")\n # if not items:\n # print(\"item为空\",shop_id,req_url)\n\n\n trace = data.get(\"trace\")\n page = trace.get(\"page\")\n\n aem_count = int(page.get(\"aem_count\")) if page.get(\"aem_count\") else 0\n if aem_count:\n self.goods_num += aem_count\n if self.goods_num%100000==1:\n print(self.goods_num)\n\n\n for i in range(20, aem_count, 20):\n url = \"https://m.aliexpress.com/api/search/products/items?pageId={}&searchType=storeSearch&sellerAdminSeq={}&storeId={}&infiniteScroll=true&start={}&shipToCountry=US&__amp_source_origin=https%3A%2F%2Fm.aliexpress.com\"\n Referer_str = \"https://m.aliexpress.com/storesearch/list/.html?sortType=TC3_D&searchType=storeSearch&trace=store2mobilestoreNew&storeId={}\"\n cookies = \"aefeMsite=amp--wRru0loiCNZjcQEqYc1Ew; ali_apache_id=11.180.122.26.1575437527682.392996.5; isg=BDEx-5kOyCf7m2SmkQaxvTBcQL0LtqIM-G1_rBNGL_giOlOMW256Y8wcWIj58j3I\"\n\n Referer = Referer_str.format(shop_id)\n url = url.format(page_id,seller_id,shop_id,i)\n headers = self.get_headers()\n headers[\"Cookie\"] = cookies\n headers[\"Referer\"] = Referer\n meta = {\"page_id\": page_id,\n \"seller_id\": seller_id,\n \"shop_id\": shop_id}\n yield scrapy.Request(url=url, callback=self.get_detail, method=\"GET\", headers=headers, meta=meta)\n\n for good in items:\n item = GmWorkItem()\n goods_url = good.get(\"action\")\n averageStarStr = good.get(\"averageStarStr\")\n imgUrl = good.get(\"imgUrl\")\n\n price = good.get(\"price\")\n price1 = price.get(\"price\")\n price_currency = price1.get(\"currency\")\n price_value = price1.get(\"value\")\n productId = good.get(\"productId\")\n subject = good.get(\"subject\")\n\n item[\"shop_id\"] = shop_id\n item[\"seller_id\"] = seller_id\n item[\"goods_url\"] = goods_url\n item[\"average_score\"] = averageStarStr\n item[\"img_url\"] = imgUrl\n item[\"currency\"] = price_currency\n item[\"price\"] = price_value\n item[\"goods_id\"] = productId\n item[\"subject\"] = subject\n item[\"shop_id\"] = shop_id\n item[\"aem_count\"] = aem_count\n\n item[\"pipeline_level\"] = \"smt商品列表\"\n yield item\n else:\n yield self.try_again(response)\n\n def get_headers(self):\n headers = {\n \"Host\": \"m.vi.aliexpress.com\",\n \"Connection\": \"keep-alive\",\n \"Accept\": \"application/json\",\n \"AMP-Same-Origin\": \"true\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cookie\": \"isg=BLe25me9pv6r8iJyBJaO7Y-BRqvB1IxEwYs0IwllWgHzuNT6FU0TLoLWnlhDUGNW\"\n }\n return headers\n\n def from_file(self,file_name):\n with open(file_name,\"r\",encoding=\"utf-8\") as f:\n for i in f:\n yield i\n", "sub_path": "gm_work/gm_work/spiders/smt_goods.py", "file_name": "smt_goods.py", "file_ext": "py", "file_size_in_byte": 6532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scrapy_redis.spiders.RedisSpider", "line_number": 9, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 17, "usage_type": "call"}, {"api_name": "tools.tools_r.smt.smt_getcid.get_prama", "line_number": 36, "usage_type": "call"}, {"api_name": "tools.tools_r.smt.smt_getcid.get_cid", "line_number": 36, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 49, "usage_type": "call"}, {"api_name": "gm_work.items.GmWorkItem", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 94, "usage_type": "call"}, {"api_name": "gm_work.items.GmWorkItem", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "489008567", "text": "from __future__ import print_function\nimport os\nimport sys\nimport time\nimport json\nimport logging\nimport logging.config\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\nfrom pkg_resources import resource_filename\n\nfrom . import get_actions, notify\n\n# The error you get for a nonexistent file is different on py2 vs py3.\nif sys.version_info.major > 2:\n FileNotFound = FileNotFoundError\nelse:\n FileNotFound = IOError\n\ndef run_forever():\n \"\"\"\n Command-line interface.\n Every minute, send all the notifications for all the boards.\n \"\"\"\n \n # Parse command-line args\n parser = ArgumentParser()\n parser.add_argument('config_file', type=str,\n help='Python file to load configuration from')\n parser.add_argument('-d', type=str, dest='directory', default='.',\n help='Directory in which to save/read state')\n parser.add_argument('-i', type=int, dest='interval', default=60,\n help='Number of seconds to sleep between rounds')\n parser.add_argument('--debug', action='store_true',\n help=('Print actions and messages, and don\\'t actually'\n ' send to HipChat'))\n args = parser.parse_args()\n\n # Set up logging\n cfg_file = resource_filename(\n __name__, 'logging_debug.cfg' if args.debug else 'logging.cfg')\n logger = logging.getLogger(__name__)\n logging.config.fileConfig(cfg_file, disable_existing_loggers=False)\n \n # Load config file\n try:\n if sys.version_info.major > 2:\n import importlib\n config = importlib.machinery.SourceFileLoader(\n 'config', args.config_file).load_module()\n else:\n import imp\n with open(args.config_file) as f:\n config = imp.load_module('config', f, args.config_file,\n ('.py', 'r', imp.PY_SOURCE))\n\n except (FileNotFound, SyntaxError):\n logger.error('Unable to import file %s', args.config_file)\n sys.exit(1)\n\n if not config.MONITOR:\n logger.error('Nothing to monitor!')\n sys.exit(2)\n\n interval = max(0, args.interval)\n\n state_file = os.path.join(args.directory, 'last-actions.json')\n # Don't check back in time more than 20 minutes ago.\n a_while_ago = time.time() - 20*60\n last_action_times = defaultdict(lambda: a_while_ago)\n try:\n last_action_times.update(json.load(open(state_file)))\n except (FileNotFound, ValueError):\n logger.warning('Warning: no saved state found.')\n\n while True:\n # First get all the actions, to avoid doing it multiple times for the\n # same board.\n new_actions = {}\n for parameters in config.MONITOR:\n board_id = parameters['board_id']\n if board_id not in new_actions:\n (actions, new_last_time) = get_actions(\n config, last_action_times[board_id], board_id)\n new_actions[board_id] = actions\n last_action_times[board_id] = new_last_time\n\n # Then send all the HipChat notifications.\n for parameters in config.MONITOR:\n board_id = parameters['board_id']\n\n # Iterate over the actions, in reverse order because of chronology.\n # Defend against the most common type of failure: KeyError\n for action in reversed(new_actions[board_id]):\n try:\n notify(config, action, debug=args.debug, **parameters)\n except KeyError:\n logger.warn('unable to process action for notification: %r',\n action)\n\n # Save state to a file.\n with open(state_file, 'w') as f:\n json.dump(last_action_times, f)\n\n time.sleep(interval)\n", "sub_path": "trello_hipchat/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 3851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.version_info", "line_number": 15, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.config.fileConfig", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 47, "usage_type": "attribute"}, {"api_name": "importlib.machinery.SourceFileLoader", "line_number": 49, "usage_type": "call"}, {"api_name": "importlib.machinery", "line_number": 49, "usage_type": "attribute"}, {"api_name": "imp.load_module", "line_number": 54, "usage_type": "call"}, {"api_name": "imp.PY_SOURCE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 70, "usage_type": "call"}, {"api_name": "json.load", "line_number": 72, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "239386283", "text": "from bs4 import BeautifulSoup\nimport argparse as ap\nimport requests, os, pickle\n\n# Pull PostMates HTML\ndef pull_postmates_html(postmates_code):\n\turl = \"https://postmates.com/merchant/\" + postmates_code\n\tr = requests.get(url)\n\tencoding = r.encoding if 'charset' in r.headers.get('content-type', '').lower() else None\n\tsoup = BeautifulSoup(r.content, from_encoding=encoding) # BeautifulSoup produces HTML of webpage\n\treturn soup\n\ndef pull_items(soup):\n\toutput = []\n\titems = soup.find_all('h3', attrs={'class' : 'product-name css-1vuygjh e1tw3vxs3'})\n\tfor item in items:\n\t\tcategory = item.parent.parent.parent.parent.parent.find('h2', attrs={'class' : 'css-sqkt8s e1u06svg0'}).get_text()\n\t\t\n\t\t# remove popular items because they're repeats \n\t\tif \"Popular Items\" in category:\n\t\t\tcontinue\n\n\t\tname = item.get_text()\n\t\tdescription = item.parent.find('div', attrs={'class' : 'product-description css-1cwo7kl e1tw3vxs5'}).get_text()\n\t\tprice = item.parent.parent.find('div', attrs={'class' : 'css-1ju2yr7 e1tw3vxs4'}).find('span').get_text()\n\t\toutput.append([name, description, price, category, \"\"])\n\treturn output\n\n# Save menu_items as a file using pickle library (not necessarily human readable)\ndef write_menu(menu_items, postmates_code):\n\tscript_dir = os.path.abspath(os.path.join(__file__ ,\"../..\"))\n\tprint(\"Saving menu items at \" + script_dir + \"/output_menu_items/postmates/\" + postmates_code + \".txt\")\n\twith open(script_dir + \"/output_menu_items/postmates/\" + postmates_code + \".txt\", 'wb') as f:\n\t\tpickle.dump(menu_items, f)\n\nif __name__ == '__main__':\n\tparser = ap.ArgumentParser()\n\tparser.add_argument('-p', '--postmates_code', help='Postmates Restaurant Code', default='ej-sushi-chicago')\n\n\targs = vars(parser.parse_args())\n\tpostmates_code = args['postmates_code']\n\tsoup = pull_postmates_html(postmates_code)\n\toutput = pull_items(soup)\n\twrite_menu(output, postmates_code)", "sub_path": "scrape_postmates.py", "file_name": "scrape_postmates.py", "file_ext": "py", "file_size_in_byte": 1873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "68212656", "text": "from django.http import Http404\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.cache import cache_page\n\nfrom wp_main.utilities import responses, utilities\nfrom wp_main.utilities.wp_logging import logger\n#from misc.models import wp_misc\nfrom misc import tools as misctools\n\n_log = logger('misc').log\n\n\n@cache_page(15 * 60)\n@csrf_protect\ndef view_index(request):\n \"\"\" Main index for Misc objects. \"\"\"\n \n miscobjs = misctools.get_visible_objects()\n context = {'request': request,\n 'extra_style_link_list': [utilities.get_browser_style(request),\n '/static/css/misc.min.css',\n '/static/css/highlighter.min.css'],\n 'miscobjects': miscobjs,\n }\n return responses.clean_response_req(\"misc/index.html\",\n context,\n request=request)\n \n\n@cache_page(15 * 60)\n@csrf_protect\ndef view_misc_any(request, identifier):\n \"\"\" View a specific misc item. \"\"\"\n \n misc = misctools.get_by_identifier(identifier)\n if not misc:\n # No misc item found by that identifier\n raise Http404()\n context = {'request': request,\n 'extra_style_link_list': [utilities.get_browser_style(request),\n '/static/css/misc.min.css',\n '/static/css/highlighter.min.css'],\n 'misc': misc,\n }\n return responses.clean_response_req('misc/misc.html',\n context,\n request=request)\n", "sub_path": "misc/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "wp_main.utilities.wp_logging.logger", "line_number": 10, "usage_type": "call"}, {"api_name": "misc.tools.get_visible_objects", "line_number": 18, "usage_type": "call"}, {"api_name": "misc.tools", "line_number": 18, "usage_type": "name"}, {"api_name": "wp_main.utilities.utilities.get_browser_style", "line_number": 20, "usage_type": "call"}, {"api_name": "wp_main.utilities.utilities", "line_number": 20, "usage_type": "name"}, {"api_name": "wp_main.utilities.responses.clean_response_req", "line_number": 25, "usage_type": "call"}, {"api_name": "wp_main.utilities.responses", "line_number": 25, "usage_type": "name"}, {"api_name": "django.views.decorators.cache.cache_page", "line_number": 13, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 14, "usage_type": "name"}, {"api_name": "misc.tools.get_by_identifier", "line_number": 35, "usage_type": "call"}, {"api_name": "misc.tools", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 38, "usage_type": "call"}, {"api_name": "wp_main.utilities.utilities.get_browser_style", "line_number": 40, "usage_type": "call"}, {"api_name": "wp_main.utilities.utilities", "line_number": 40, "usage_type": "name"}, {"api_name": "wp_main.utilities.responses.clean_response_req", "line_number": 45, "usage_type": "call"}, {"api_name": "wp_main.utilities.responses", "line_number": 45, "usage_type": "name"}, {"api_name": "django.views.decorators.cache.cache_page", "line_number": 30, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "530066595", "text": "import time, multiprocessing, queue\n\ndef sumPart(q, a1, a2):\n\ts1 = 0\n\tfor i in range(a1, a2 + 1):\n\t\ts1 += i\n\tq.put(s1)\n\nif __name__ == \"__main__\":\n\tthreadsCount = 2\n\tlimit = 2000000\n\tda = int(limit / threadsCount) + (1 if (limit % threadsCount) > 0 else 0)\n\tprint (da)\n\tthreads = []\n\tq = multiprocessing.Queue()\n\n\tstartTime = time.time()\n\n\ta = 0\n\tfor _ in range(threadsCount):\n\t\taNew = a + da\n\t\tt = multiprocessing.Process(target = sumPart, args = (q, a + 1, aNew))\n\t\tt.start()\n\t\tthreads.append(t)\n\t\ta = aNew\n\n\tfor i in range(len(threads)):\n\t\tthreads[i].join()\n\n\ts = 0\n\tfor i in range(threadsCount):\n\t\ts += q.get()\n\n\tprint (time.time() - startTime)\n\tprint (s)", "sub_path": "multiprocessingORthread/stMP.py", "file_name": "stMP.py", "file_ext": "py", "file_size_in_byte": 659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "multiprocessing.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "132052169", "text": "from lib.scrapy_table import Scrapy_Table\n\nurl=\"https://pt.wikipedia.org/wiki/C%C3%A2mara_Municipal_de_S%C3%A3o_Paulo\"\nurl_jato=\"https://pt.wikipedia.org/wiki/Lista_de_pessoas_envolvidas_na_Opera%C3%A7%C3%A3o_Lava_Jato\"\n\nsite_connect = Scrapy_Table(url)\nsite_jato = Scrapy_Table(url_jato)\n\ntables = tuple(site_connect.get_tables(5))\nlista_lava_jato = tuple(site_jato.get_tables(1))\n\nlista_investigados = ()\n\nfor investigados in lista_lava_jato[1:]:\n lista_investigados = lista_investigados + (investigados[0],)\n\nfor vereador in tables[1:]:\n if vereador[0] in lista_investigados:\n print(vereador)\n\n# vereador = \"Aécio Neves\"\n# if vereador in lista_investigados:\n# print(vereador)", "sub_path": "modulo1/Labs/myCode/ex7.py", "file_name": "ex7.py", "file_ext": "py", "file_size_in_byte": 697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "lib.scrapy_table.Scrapy_Table", "line_number": 6, "usage_type": "call"}, {"api_name": "lib.scrapy_table.Scrapy_Table", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "78999154", "text": "import numpy as np\n\ndef create_all_subject_connectivity_matrices(subjects):\n\n connectivity_matrices = []\n for subject in subjects:\n connectivity_matrices.append(np.load(subject))\n connectivity_matrices = np.array(connectivity_matrices)\n connectivity_matrices = np.swapaxes(connectivity_matrices, 0, -1)\n\n return connectivity_matrices\n\ndef norm_matrices(matrices, norm_type = 'scaling'):\n norm_matrices = matrices.copy()\n for s in range(matrices.shape[-1]):\n if norm_type == 'scaling':\n norm_matrices[norm_matrices==0] = np.nan\n norm_matrices[:,:,s] = norm_scaling(matrices[:,:,s])\n elif norm_type == 'fisher':\n norm_matrices[norm_matrices == 0] = np.nan\n norm_matrices[:,:,s] = fisher_transformation(matrices[:,:,s])\n elif norm_type == 'z-score':\n norm_matrices[norm_matrices == 0] = np.nan\n norm_matrices[:,:,s] = z_score(matrices[:,:,s])\n elif norm_type == 'rating':\n norm_matrices[:,:,s] = rating(matrices[:,:,s])\n\n return norm_matrices\n\ndef norm_scaling(matrix):\n norm_mat = (matrix - np.nanmin(matrix)) / (np.nanmax(matrix) - np.nanmin(matrix))\n return norm_mat\n\ndef fisher_transformation(matrix):\n matrix = np.arctanh(matrix)\n return matrix\n\ndef z_score(matrix):\n matrix = (matrix - np.nanmean(matrix)) / np.nanstd(matrix)\n return matrix\n\ndef rating(matrix):\n from scipy.stats import rankdata\n matrix = rankdata(matrix, method='dense').reshape(matrix.shape)-1\n return matrix", "sub_path": "Tractography/group_analysis.py", "file_name": "group_analysis.py", "file_ext": "py", "file_size_in_byte": 1543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.load", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.nanmin", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arctanh", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.nanstd", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats.rankdata", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "77928708", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author: Mark\n# datetime: 2020/9/24 9:40\n# filename: _scorecard\n# software: PyCharm\n\nimport math\nimport numpy as np\nimport pandas as pd\nfrom pydantic import confloat\n\nfrom ..base import BaseEstimator\nfrom ..base import ModelMixin\nfrom . import SKlearnLogisticRegression\nfrom . import StatsLogisticRegression\n\n\nclass ScorecardModel(ModelMixin,\n BaseEstimator):\n \"\"\"\n 功能:评分卡模型\n 输入:训练集自变量表 X_train\n 训练集标签 y_train\n 控制:指明标签列: label_column\n 逻辑回归算法: is_sklearn_LR\n 基础分: basic_score\n 翻倍分: pdo\n P0: po\n P值筛选阈值:p_value_threshold\n 输出:评分卡模型数据\n \"\"\"\n label_column: str = 'label'\n is_sklearn_LR: bool = False\n basic_score: int = 600\n pdo: int = 20\n p0: confloat(ge=0, le=1) = 0.05\n p_value_threshold: confloat(ge=0, le=1) = 0.05\n\n p_value_df: pd.DataFrame = None\n\n def _calculate_scorecard(self, woe_df, model_df):\n # 合并参数矩阵列\n cal_df = woe_df.merge(model_df.loc[:, ['column_name', 'coefficient']], on='column_name')\n # 计算评分\n cal_df['B'] = float(self.pdo) / math.log(2)\n cal_df['A'] = float(self.basic_score) + cal_df['B'] * math.log(float(self.p0))\n cal_df['score'] = round(\n cal_df.loc[:, 'A'] / model_df.shape[0] - cal_df.loc[:, 'B'] * cal_df.loc[:, 'coefficient'] * cal_df.loc[:,\n 'encoding'], 0)\n return cal_df\n\n def fit(self, X_train, y_train):\n \"\"\"\n 特征选择服务\n :param X_train: 数据集\n :return: 评分卡模型\n \"\"\"\n # 调用逻辑回归模型,获取系数矩阵\n if self.is_sklearn_LR:\n # 拟合模型, 获取参数矩阵\n sklogistic = SKlearnLogisticRegression()\n sklogistic.fit(x_train=X_train, y_train=y_train)\n coefficient_matrix = sklogistic.model.coef_\n\n # 组织数据\n column_df = pd.DataFrame({'column_name': X_train.columns.tolist()})\n coefficient_df = pd.DataFrame(coefficient_matrix).T\n coefficient_df.columns = ['coefficient']\n model_df = pd.concat([column_df, coefficient_df], axis=1).reset_index(drop=True)\n else:\n # 执行统计包逻辑回归拟合模型, 获取参数矩阵\n # 通过P值筛选数据\n # 嵌套循环用于实现有放回的 P 值校验\n # 外层大循环,针对特征个数循环\n filtered_col_list = X_train.columns.tolist()\n first_level_num = len(filtered_col_list)\n stop_flag = False\n for step_1 in range(first_level_num):\n # 内层循环,实现外层循环特征数量下,有放回的P值校验\n # 加 1 是因为首次循环没有执行特征删除,加 1 后可执行所有特征删除遍历。\n second_level_num = len(filtered_col_list) + 1\n # 各特征 P 值均值series,在内循环中更新\n p_values_series = pd.Series([0.0] * len(filtered_col_list), index=filtered_col_list)\n delete_list = []\n fit_cols_list = filtered_col_list.copy()\n for step_2 in range(second_level_num):\n # 拟合数据\n statslogistic = StatsLogisticRegression()\n statslogistic.fit(x_train=X_train[fit_cols_list], y_train=y_train)\n # 模型系数及P值\n coefficient_matrix = statslogistic.model.params\n p_values = statslogistic.model.pvalues\n # P值筛选截止条件:所有特征的 P 值均小于给定阈值\n if step_2 == 0 and p_values.apply(lambda x: x <= self.p_value_threshold).all() and (coefficient_matrix.apply(lambda x: x >= 0).all() or coefficient_matrix.apply(lambda x: x < 0).all()):\n stop_flag = True\n break\n else:\n # 更新 P 值series\n if step_2 == 0:\n p_values_series = p_values_series.add(p_values)\n else:\n _col = (set(p_values_series.index.tolist()) - set(p_values.index.tolist())).pop()\n fill_v = p_values_series.loc[_col]\n p_values_series = p_values_series.add(p_values, fill_value=fill_v) / 2\n # 删除 P 值最大,且没有被删除过的特征\n sorted_col_list = p_values_series.sort_values(ascending=False).index.tolist()\n del_col = ''\n for col in sorted_col_list:\n if col not in delete_list:\n del_col = col\n delete_list.append(col)\n break\n # 准备下次循环的特征集,有放回的删除本轮最大 P 值特征\n if del_col:\n fit_cols_list = filtered_col_list.copy()\n fit_cols_list.remove(del_col)\n if stop_flag:\n break\n else:\n # 删除 P 均值最大的特征\n sorted_col = p_values_series.sort_values(ascending=False).index.tolist()\n if sorted_col:\n filtered_col_list.remove(sorted_col[0])\n\n if len(filtered_col_list) == 0:\n raise Exception(\"No feature's P value is less than the p_value_threshold, please enlarge the threshold.\"\n \"\\n没有特征能够满足 P 值筛选条件,请适当增大 P 值筛选阈值参数: p_value_threshold\")\n\n # 组织数据\n model_df = pd.DataFrame()\n for i in range(len(coefficient_matrix.index)):\n model_df.loc[i, 'column_name'] = coefficient_matrix.index[i]\n model_df.loc[i, 'coefficient'] = coefficient_matrix[i]\n model_df.reset_index(drop=True, inplace=True)\n\n # 保存各特征显著性数据:p值\n self.p_value_df = p_values.copy(deep=True)\n self.p_value_df = self.p_value_df.to_frame().reset_index()\n self.p_value_df.columns = ['columns', 'p_value']\n\n # 训练数据表变换\n woe_df = pd.DataFrame()\n for col in X_train[filtered_col_list].columns:\n temp_woe = X_train[col].unique().tolist()\n temp_woe_df = pd.DataFrame({'column_name': [col] * len(temp_woe), 'encoding': temp_woe})\n woe_df = pd.concat([woe_df, temp_woe_df], axis=0).reset_index(drop=True)\n\n # 计算评分卡\n result_df = self._calculate_scorecard(woe_df, model_df)\n # 特征列各特征值去掉后缀\n result_df.loc[:, 'column_name'] = result_df.loc[:, 'column_name'].apply(\n lambda x: '_'.join(str(x).split('_')[:-1]))\n\n self.model = result_df\n\n return self\n\n def _in_area(self, area, value):\n # 处理分箱为空的情况\n none_list = ['', ' ', 'None', 'nan', 'NaN', 'NULL']\n if str(area) in none_list:\n if str(value) in none_list:\n result = True\n else:\n result = False\n # 处理发分箱特征值匹配\n elif area.startswith('('):\n area = area.replace('(', '').replace(')', '').replace('[', '').replace(']', '').replace(' ', '')\n low_str, high_str = area.split(',')\n low_boundary = -np.inf if low_str == '-inf' else float(low_str)\n high_boundary = np.inf if high_str == 'inf' else float(high_str)\n if low_boundary < float(value) <= high_boundary:\n result = True\n else:\n result = False\n # 处理类别特征匹配(未分箱数据)\n else:\n if area == str(value):\n result = True\n else:\n result = False\n return result\n\n def _get_score(self, score_dict, value):\n for interval, score in score_dict.items():\n if self._in_area(interval, value):\n return score\n\n def predict(self, X_test):\n \"\"\"\n 依据模型计算得分\n :param X_test: 数据\n :return: 最终得分\n \"\"\"\n score_card_df = self.model\n # 过滤特征列\n selected_cols = score_card_df['column_name'].unique().tolist()\n # 处理空值情况\n selected_cols = [item for item in selected_cols if item]\n columns_dict = {}\n for f_col in selected_cols:\n for col in X_test.columns:\n if f_col.startswith(col) or col.startswith(f_col):\n columns_dict[col] = f_col\n break\n filter_feature_df = X_test[columns_dict]\n for col in columns_dict.keys():\n # 过滤特征得分分组\n _score = score_card_df.loc[columns_dict[col] == score_card_df['column_name'], ['binning', 'score']]\n # 分箱-得分字典\n map_score_dict = dict(zip(_score['binning'].astype('str').tolist(), _score['score'].tolist()))\n # 将原始数据替换为得分\n filter_feature_df[col] = filter_feature_df[col].apply(lambda x: self._get_score(map_score_dict, x))\n # 计算记录总分\n filter_feature_df['score'] = filter_feature_df[columns_dict.keys()].sum(1)\n\n filter_feature_df_final = filter_feature_df.loc[:, ['score']]\n return filter_feature_df_final\n", "sub_path": "mldesigntoolkit/mldesigntoolkit/modules/modeling/_scorecard.py", "file_name": "_scorecard.py", "file_ext": "py", "file_size_in_byte": 9866, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "base.ModelMixin", "line_number": 19, "usage_type": "name"}, {"api_name": "base.BaseEstimator", "line_number": 20, "usage_type": "name"}, {"api_name": "pydantic.confloat", "line_number": 37, "usage_type": "call"}, {"api_name": "pydantic.confloat", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 46, "usage_type": "call"}, {"api_name": "math.log", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "204739894", "text": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom .resource import Resource\n\n\nclass ManagedHostingEnvironment(Resource):\n \"\"\"Description of a managed hosting environment.\n\n :param id: Resource Id\n :type id: str\n :param name: Resource Name\n :type name: str\n :param kind: Kind of resource\n :type kind: str\n :param location: Resource Location\n :type location: str\n :param type: Resource type\n :type type: str\n :param tags: Resource tags\n :type tags: dict\n :param managed_hosting_environment_name: Name of the managed hosting\n environment\n :type managed_hosting_environment_name: str\n :param managed_hosting_environment_location: Location of the managed\n hosting environment e.g. \"West US\"\n :type managed_hosting_environment_location: str\n :param status: Current status of the managed hosting environment.\n Possible values include: 'Preparing', 'Ready', 'Deleting'\n :type status: str or :class:`ManagedHostingEnvironmentStatus\n `\n :param virtual_network: Description of the managed hosting environment's\n virtual network\n :type virtual_network: :class:`VirtualNetworkProfile\n `\n :param ipssl_address_count: Number of ip ssl addresses reserved for the\n managed hosting environment\n :type ipssl_address_count: int\n :param dns_suffix: DNS suffix of the managed hosting environment\n :type dns_suffix: str\n :param subscription_id: Subscription of the managed hosting environment\n (read only)\n :type subscription_id: str\n :param resource_group: Resource group of the managed hosting environment\n (read only)\n :type resource_group: str\n :param environment_is_healthy: True/false indicating whether the managed\n hosting environment is healthy\n :type environment_is_healthy: bool\n :param environment_status: Detailed message about with results of the\n last check of the managed hosting environment\n :type environment_status: str\n :param suspended: True/false indicating whether the managed hosting\n environment is suspended. The environment can be suspended e.g. when the\n management endpoint is no longer available\n (most likely because NSG blocked the incoming traffic)\n :type suspended: bool\n :param api_management_account: Resource id of the api management account\n associated with this managed hosting environment (read only)\n :type api_management_account: str\n \"\"\" \n\n _validation = {\n 'location': {'required': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'kind': {'key': 'kind', 'type': 'str'},\n 'location': {'key': 'location', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'managed_hosting_environment_name': {'key': 'properties.name', 'type': 'str'},\n 'managed_hosting_environment_location': {'key': 'properties.location', 'type': 'str'},\n 'status': {'key': 'properties.status', 'type': 'ManagedHostingEnvironmentStatus'},\n 'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},\n 'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},\n 'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},\n 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},\n 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},\n 'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},\n 'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},\n 'suspended': {'key': 'properties.suspended', 'type': 'bool'},\n 'api_management_account': {'key': 'properties.apiManagementAccount', 'type': 'str'},\n }\n\n def __init__(self, location, id=None, name=None, kind=None, type=None, tags=None, managed_hosting_environment_name=None, managed_hosting_environment_location=None, status=None, virtual_network=None, ipssl_address_count=None, dns_suffix=None, subscription_id=None, resource_group=None, environment_is_healthy=None, environment_status=None, suspended=None, api_management_account=None):\n super(ManagedHostingEnvironment, self).__init__(id=id, name=name, kind=kind, location=location, type=type, tags=tags)\n self.managed_hosting_environment_name = managed_hosting_environment_name\n self.managed_hosting_environment_location = managed_hosting_environment_location\n self.status = status\n self.virtual_network = virtual_network\n self.ipssl_address_count = ipssl_address_count\n self.dns_suffix = dns_suffix\n self.subscription_id = subscription_id\n self.resource_group = resource_group\n self.environment_is_healthy = environment_is_healthy\n self.environment_status = environment_status\n self.suspended = suspended\n self.api_management_account = api_management_account\n", "sub_path": "azure-mgmt-web/azure/mgmt/web/models/managed_hosting_environment.py", "file_name": "managed_hosting_environment.py", "file_ext": "py", "file_size_in_byte": 5538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "resource.Resource", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "313248666", "text": "# Copyright (C) 2014-2015 New York University\n# This file is part of ReproZip which is released under the Revised BSD License\n# See file LICENSE for full license details.\n\n\"\"\"Entry point for the reprounzip utility.\n\nThis contains :func:`~reprounzip.reprounzip.main`, which is the entry point\ndeclared to setuptools. It is also callable directly.\n\nIt dispatchs to plugins registered through pkg_resources as entry point\n``reprounzip.unpackers``.\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport pickle\nimport platform\nfrom rpaths import PosixPath, Path\nimport sys\nimport tarfile\n\nfrom reprounzip.common import load_config as load_config_file\nfrom reprounzip.main import unpackers\nfrom reprounzip.unpackers.common import load_config, COMPAT_OK, COMPAT_MAYBE, \\\n COMPAT_NO, shell_escape\nfrom reprounzip.utils import iteritems, hsize\n\n\ndef print_info(args):\n \"\"\"Writes out some information about a pack file.\n \"\"\"\n pack = Path(args.pack[0])\n\n # Loads config\n runs, packages, other_files = config = load_config(pack)\n\n pack_total_size = 0\n pack_total_paths = 0\n pack_files = 0\n pack_dirs = 0\n pack_symlinks = 0\n pack_others = 0\n tar = tarfile.open(str(pack), 'r:*')\n for m in tar.getmembers():\n if not m.name.startswith('DATA/'):\n continue\n pack_total_size += m.size\n pack_total_paths += 1\n if m.isfile():\n pack_files += 1\n elif m.isdir():\n pack_dirs += 1\n elif m.issym():\n pack_symlinks += 1\n else:\n pack_others += 1\n tar.close()\n\n meta_total_paths = 0\n meta_packed_packages_files = 0\n meta_unpacked_packages_files = 0\n meta_packages = len(packages)\n meta_packed_packages = 0\n for package in packages:\n nb = len(package.files)\n meta_total_paths += nb\n if package.packfiles:\n meta_packed_packages_files += nb\n meta_packed_packages += 1\n else:\n meta_unpacked_packages_files += nb\n nb = len(other_files)\n meta_total_paths += nb\n meta_packed_paths = meta_packed_packages_files + nb\n\n if runs:\n meta_architecture = runs[0]['architecture']\n if any(r['architecture'] != meta_architecture\n for r in runs):\n logging.warning(\"Runs have different architectures\")\n meta_distribution = runs[0]['distribution']\n if any(r['distribution'] != meta_distribution\n for r in runs):\n logging.warning(\"Runs have different distributions\")\n meta_distribution = ' '.join(t for t in meta_distribution if t)\n\n current_architecture = platform.machine().lower()\n current_distribution = platform.linux_distribution()[0:2]\n current_distribution = ' '.join(t for t in current_distribution if t)\n\n print(\"Pack file: %s\" % pack)\n print(\"\\n----- Pack information -----\")\n print(\"Compressed size: %s\" % hsize(pack.size()))\n print(\"Unpacked size: %s\" % hsize(pack_total_size))\n print(\"Total packed paths: %d\" % pack_total_paths)\n if args.verbosity >= 3:\n print(\" Files: %d\" % pack_files)\n print(\" Directories: %d\" % pack_dirs)\n print(\" Symbolic links: %d\" % pack_symlinks)\n if pack_others:\n print(\" Unknown (what!?): %d\" % pack_others)\n print(\"\\n----- Metadata -----\")\n if args.verbosity >= 3:\n print(\"Total paths: %d\" % meta_total_paths)\n print(\"Listed packed paths: %d\" % meta_packed_paths)\n if packages:\n print(\"Total software packages: %d\" % meta_packages)\n print(\"Packed software packages: %d\" % meta_packed_packages)\n if args.verbosity >= 3:\n print(\"Files from packed software packages: %d\" %\n meta_packed_packages_files)\n print(\"Files from unpacked software packages: %d\" %\n meta_unpacked_packages_files)\n if runs:\n print(\"Architecture: %s (current: %s)\" % (meta_architecture,\n current_architecture))\n print(\"Distribution: %s (current: %s)\" % (\n meta_distribution, current_distribution or \"(not Linux)\"))\n print(\"Executions (%d):\" % len(runs))\n for i, r in enumerate(runs):\n cmdline = ' '.join(shell_escape(a) for a in r['argv'])\n if len(runs) > 1:\n print(\" %d: %s\" % (i, cmdline))\n else:\n print(\" %s\" % cmdline)\n if args.verbosity >= 2:\n print(\" input files: %s\" %\n \", \".join(r['input_files']))\n print(\" output files: %s\" %\n \", \".join(r['output_files']))\n print(\" wd: %s\" % r['workingdir'])\n if 'signal' in r:\n print(\" signal: %d\" % r['signal'])\n else:\n print(\" exitcode: %d\" % r['exitcode'])\n\n # Unpacker compatibility\n print(\"\\n----- Unpackers -----\")\n unpacker_status = {}\n for name, upk in iteritems(unpackers):\n if 'test_compatibility' in upk:\n compat = upk['test_compatibility']\n if callable(compat):\n compat = compat(pack, config=config)\n if isinstance(compat, (tuple, list)):\n compat, msg = compat\n else:\n msg = None\n unpacker_status.setdefault(compat, []).append((name, msg))\n else:\n unpacker_status.setdefault(None, []).append((name, None))\n for s, n in [(COMPAT_OK, \"Compatible\"), (COMPAT_MAYBE, \"Unknown\"),\n (COMPAT_NO, \"Incompatible\")]:\n if s != COMPAT_OK and args.verbosity < 2:\n continue\n if s not in unpacker_status:\n continue\n upks = unpacker_status[s]\n print(\"%s (%d):\" % (n, len(upks)))\n for upk_name, msg in upks:\n if msg is not None:\n print(\" %s (%s)\" % (upk_name, msg))\n else:\n print(\" %s\" % upk_name)\n\n\ndef showfiles(args):\n \"\"\"Writes out the input and output files.\n\n Works both for a pack file and for an extracted directory.\n \"\"\"\n pack = Path(args.pack[0])\n\n if not pack.exists():\n logging.critical(\"Pack or directory %s does not exist\", pack)\n sys.exit(1)\n\n if pack.is_dir():\n # Reads info from an unpacked directory\n runs, packages, other_files = load_config_file(pack / 'config.yml',\n canonical=True)\n # The '.reprounzip' file is a pickled dictionary, it contains the name\n # of the files that replaced each input file (if upload was used)\n with pack.open('rb', '.reprounzip') as fp:\n unpacked_info = pickle.load(fp)\n input_files = unpacked_info.get('input_files', {})\n\n print(\"Input files:\")\n for i, run in enumerate(runs):\n if len(runs) > 1:\n print(\" Run %d:\" % i)\n for input_name, path in iteritems(run['input_files']):\n print(\" %s (%s)\" % (input_name, path))\n if input_files.get(input_name) is not None:\n assigned = PosixPath(input_files[input_name])\n else:\n assigned = \"(original)\"\n print(\" %s\" % assigned)\n\n print(\"Output files:\")\n for i, run in enumerate(runs):\n if len(runs) > 1:\n print(\" Run %d:\" % i)\n for output_name, path in iteritems(run['output_files']):\n print(\" %s (%s)\" % (output_name, path))\n\n else: # pack.is_file()\n # Reads info from a pack file\n runs, packages, other_files = load_config(pack)\n\n print(\"Input files:\")\n for i, run in enumerate(runs):\n if len(runs) > 1:\n print(\" Run %d:\" % i)\n for input_name, path in iteritems(run['input_files']):\n print(\" %s (%s)\" % (input_name, path))\n\n print(\"Output files:\")\n for i, run in enumerate(runs):\n if len(runs) > 1:\n print(\" Run %d:\" % i)\n for output_name, path in iteritems(run['output_files']):\n print(\" %s (%s)\" % (output_name, path))\n\n\ndef setup_info(parser, **kwargs):\n \"\"\"Prints out some information about a pack\n \"\"\"\n parser.add_argument('pack', nargs=1,\n help=\"Pack to read\")\n parser.set_defaults(func=print_info)\n\n\ndef setup_showfiles(parser, **kwargs):\n \"\"\"Prints out input and output file names\n \"\"\"\n parser.add_argument('pack', nargs=1,\n help=\"Pack or directory to read from\")\n parser.set_defaults(func=showfiles)\n", "sub_path": "reprounzip/reprounzip/pack_info.py", "file_name": "pack_info.py", "file_ext": "py", "file_size_in_byte": 8750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rpaths.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "reprounzip.unpackers.common.load_config", "line_number": 36, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 85, "usage_type": "call"}, {"api_name": "platform.machine", "line_number": 88, "usage_type": "call"}, {"api_name": "platform.linux_distribution", "line_number": 89, "usage_type": "call"}, {"api_name": "reprounzip.utils.hsize", "line_number": 94, "usage_type": "call"}, {"api_name": "reprounzip.utils.hsize", "line_number": 95, "usage_type": "call"}, {"api_name": "reprounzip.unpackers.common.shell_escape", "line_number": 122, "usage_type": "call"}, {"api_name": "reprounzip.utils.iteritems", "line_number": 141, "usage_type": "call"}, {"api_name": "reprounzip.main.unpackers", "line_number": 141, "usage_type": "argument"}, {"api_name": "reprounzip.unpackers.common.COMPAT_OK", "line_number": 153, "usage_type": "name"}, {"api_name": "reprounzip.unpackers.common.COMPAT_MAYBE", "line_number": 153, "usage_type": "name"}, {"api_name": "reprounzip.unpackers.common.COMPAT_NO", "line_number": 154, "usage_type": "name"}, {"api_name": "reprounzip.unpackers.common.COMPAT_OK", "line_number": 155, "usage_type": "name"}, {"api_name": "rpaths.Path", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 176, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 177, "usage_type": "call"}, {"api_name": "reprounzip.common.load_config", "line_number": 181, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 186, "usage_type": "call"}, {"api_name": "reprounzip.utils.iteritems", "line_number": 193, "usage_type": "call"}, {"api_name": "rpaths.PosixPath", "line_number": 196, "usage_type": "call"}, {"api_name": "reprounzip.utils.iteritems", "line_number": 205, "usage_type": "call"}, {"api_name": "reprounzip.unpackers.common.load_config", "line_number": 210, "usage_type": "call"}, {"api_name": "reprounzip.utils.iteritems", "line_number": 216, "usage_type": "call"}, {"api_name": "reprounzip.utils.iteritems", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "225519986", "text": "import gym\nfrom gym.wrappers import Monitor\nimport itertools\nimport numpy as np\nimport os\nimport random\nimport sys\nimport tensorflow as tf\nimport time\n\nfrom lib import plotting\nfrom lib.dqn_utils import *\nfrom collections import deque, namedtuple\n\n# make enviroment\nenv = gym.envs.make(\"Breakout-v0\")\n\n# Atari Actions: 0 (noop), 1 (fire), 2 (left) and 3 (right) are valid actions\nVALID_ACTIONS = [0, 1, 2, 3]\n\nclass Estimator():\n\t\"\"\"\n\tQ-Value Estimator neural network.\n\n\tThis network is used for both the Q-Network and the Target Network.\n\t\"\"\"\n\tdef __init__(self, scope=\"estimator\", summaries_dir=None):\n\t\tself.scope = scope\n\t\t# Writes Tensorboard summaries to disk\n\t\tself.summary_writer = None\n\t\twith tf.variable_scope(scope):\n\t\t\t# Build the graph\n\t\t\tself._build_model()\n\t\t\tif summaries_dir:\n\t\t\t\tsummary_dir = os.path.join(summaries_dir, \"summaries_{}\".format(scope))\n\t\t\t\tif not os.path.exists(summary_dir):\n\t\t\t\t\tos.makedirs(summary_dir)\n\t\t\t\tself.summary_writer = tf.summary.FileWriter(summary_dir)\n\n\tdef _build_model( self ):\n\t\t\"\"\"\n\t\t\tbuild computation graph\n\t\t\"\"\"\n\n\t\t# Placeholders for our input\n\t\t# Our input are 4 RGB frames of shape 84, 84 each\n\t\tself.X_pl = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name=\"X\")\n\t\t# The TD target value\n\t\tself.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name=\"y\")\n\t\t# Integer id of which action was selected\n\t\tself.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n\t\tX = tf.to_float(self.X_pl) / 255.0\n\t\tbatch_size = tf.shape(self.X_pl)[0]\n\n\t\t# Three convolutional layers\n\t\tconv1 = tf.contrib.layers.conv2d(X, 32, 8, 4, activation_fn=tf.nn.relu)\n\t\tconv2 = tf.contrib.layers.conv2d(conv1, 64, 4, 2, activation_fn=tf.nn.relu)\n\t\tconv3 = tf.contrib.layers.conv2d(conv2, 64, 3, 1, activation_fn=tf.nn.relu)\n\n\t\t# Fully connected layers\n\t\tflattened = tf.contrib.layers.flatten(conv3)\n\t\tfc1 = tf.contrib.layers.fully_connected(flattened, 512)\n\t\tself.predictions = tf.contrib.layers.fully_connected(fc1, len(VALID_ACTIONS))\n\n\t\t# Get the predictions for the chosen actions only\n\t\tgather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n\t\tself.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n\n\t\t# Calcualte the loss\n\t\tself.losses = tf.squared_difference( self.y_pl, self.action_predictions )\n\t\tself.loss = tf.reduce_mean( self.losses )\n\n\t\t# Optimizer Parameters from original paper\n\t\tself.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n\t\tself.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())\n\n\t\t# Summaries for Tensorboard\n\t\tself.summaries = tf.summary.merge([\n\t\t tf.summary.scalar(\"loss\", self.loss),\n\t\t tf.summary.histogram(\"loss_hist\", self.losses),\n\t\t tf.summary.histogram(\"q_values_hist\", self.predictions),\n\t\t tf.summary.scalar(\"max_q_value\", tf.reduce_max(self.predictions))\n\t\t])\n\n\n\tdef predict(self, sess, s):\n\t \"\"\"\n\t Predicts action values.\n\n\t Args:\n\t sess: Tensorflow session\n\t s: State input of shape [batch_size, 4, 160, 160, 3]\n\n\t Returns:\n\t Tensor of shape [batch_size, NUM_VALID_ACTIONS] containing the estimated \n\t action values.\n\t \"\"\"\n\t return sess.run(self.predictions, { self.X_pl: s })\n\n\tdef update(self, sess, s, a, y):\n\t\t\"\"\"\n\t\tUpdates the estimator towards the given targets.\n\n\t\tArgs:\n\t\t sess: Tensorflow session object\n\t\t s: State input of shape [batch_size, 4, 160, 160, 3]\n\t\t a: Chosen actions of shape [batch_size]\n\t\t y: Targets of shape [batch_size]\n\n\t\tReturns:\n\t\t The calculated loss on the batch.\n\t\t\"\"\"\n\t\tfeed_dict = { self.X_pl: s, self.y_pl: y, self.actions_pl: a }\n\t\tsummaries, global_step, _, loss = sess.run(\n\t\t\t[self.summaries, tf.contrib.framework.get_global_step(), self.train_op, self.loss], feed_dict)\n\t\tif self.summary_writer:\n\t\t self.summary_writer.add_summary(summaries, global_step)\n\t\treturn loss\n\ndef deep_q_learning(sess,\n\t env,\n\t q_estimator,\n\t target_estimator,\n\t state_processor,\n\t num_episodes,\n\t experiment_dir,\n\t replay_memory_size=500000,\n\t replay_memory_init_size=50000,\n\t update_target_estimator_every=10000,\n\t discount_factor=0.99,\n\t epsilon_start=1.0,\n\t epsilon_end=0.1,\n\t epsilon_decay_steps=500000,\n\t batch_size=32,\n\t record_video_every=50):\n\t\"\"\"\n\tQ-Learning algorithm for off-policy TD control using Function Approximation.\n\tFinds the optimal greedy policy while following an epsilon-greedy policy.\n\n\tArgs:\n\t sess: Tensorflow Session object\n\t env: OpenAI environment\n\t q_estimator: Estimator object used for the q values\n\t target_estimator: Estimator object used for the targets\n\t state_processor: A StateProcessor object\n\t num_episodes: Number of episodes to run for\n\t experiment_dir: Directory to save Tensorflow summaries in\n\t replay_memory_size: Size of the replay memory\n\t replay_memory_init_size: Number of random experiences to sampel when initializing \n\t the reply memory.\n\t update_target_estimator_every: Copy parameters from the Q estimator to the \n\t target estimator every N steps\n\t discount_factor: Gamma discount factor\n\t epsilon_start: Chance to sample a random action when taking an action.\n\t Epsilon is decayed over time and this is the start value\n\t epsilon_end: The final minimum value of epsilon after decaying is done\n\t epsilon_decay_steps: Number of steps to decay epsilon over\n\t batch_size: Size of batches to sample from the replay memory\n\t record_video_every: Record a video every N episodes\n\n\tReturns:\n\t An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n\t\"\"\"\n\n\tTransition = namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n\n\t# The replay memory\n\treplay_memory = []\n\n\t# Keeps track of useful statistics\n\tstats = plotting.EpisodeStats(\n\t episode_lengths=np.zeros(num_episodes),\n\t episode_rewards=np.zeros(num_episodes))\n\n\t# Create directories for checkpoints and summaries\n\tcheckpoint_dir = os.path.join(experiment_dir, \"checkpoints\")\n\tcheckpoint_path = os.path.join(checkpoint_dir, \"model\")\n\tmonitor_path = os.path.join(experiment_dir, \"monitor\")\n\n\tif not os.path.exists(checkpoint_dir):\n\t os.makedirs(checkpoint_dir)\n\tif not os.path.exists(monitor_path):\n\t os.makedirs(monitor_path)\n\n\tsaver = tf.train.Saver()\n\t# Load a previous checkpoint if we find one\n\tlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n\tif latest_checkpoint:\n\t print(\"Loading model checkpoint {}...\\n\".format(latest_checkpoint))\n\t saver.restore(sess, latest_checkpoint)\n\n\t# Get the current time step\n\ttotal_t = sess.run(tf.contrib.framework.get_global_step())\n\n\t# The epsilon decay schedule\n\tepsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n\t# The policy we're following\n\tpolicy = make_epsilon_greedy_policy(\n\t q_estimator,\n\t len(VALID_ACTIONS))\n\n\t# Populate the replay memory with initial experience\n\tprint(\"Populating replay memory...\")\n\t############################################################\n\t# YOUR CODE 1 : Populate replay memory!\n\t# Hints : use function \"populate_replay_buffer\"\n\t# about 1 line code\n\treplay_memory = populate_replay_buffer( sess, env, state_processor, replay_memory_init_size, VALID_ACTIONS, Transition, policy )\n\t\n\t\n\n\t# Record videos\n\tenv= Monitor(env,\n\t directory=monitor_path,\n\t resume=True,\n\t video_callable=lambda count: count % record_video_every == 0)\n\n\tfor i_episode in range(num_episodes):\n\t\t# Save the current checkpoint\n\t\tsaver.save(tf.get_default_session(), checkpoint_path)\n\n\t\t# Reset the environment\n\t\tstate = env.reset()\n\t\tstate = state_process(sess, state_processor, state)\n\t\tloss = None\n\n\t\t# One step in the environment\n\t\tfor t in itertools.count():\n\t\t\t\n\t\t\t# Epsilon for this time step\n\t\t\tepsilon = epsilons[min(total_t, epsilon_decay_steps-1)]\n\n\t\t\t# Add epsilon to Tensorboard\n\t\t\tepisode_summary = tf.Summary()\n\t\t\tepisode_summary.value.add(simple_value=epsilon, tag=\"epsilon\")\n\t\t\tq_estimator.summary_writer.add_summary(episode_summary, total_t)\n\n\t\t\t###########################################################\n\t\t\t# YOUR CODE 2: Target network update\n\t\t\t# Hints : use function \"copy_model_parameters\"\n\t\t\tif total_t % update_target_estimator_every == 0:\n\t\t\t\tcopy_model_parameters(sess, q_estimator, target_estimator)\n\n\t\t\t# Print out which step we're on, useful for debugging.\n\t\t\tprint(\"\\rStep {} ({}) @ Episode {}/{}, loss: {} Memory Len {} \".format(\n\t\t\t\t\tt, total_t, i_episode + 1, num_episodes, loss, len(replay_memory)), end=\"\")\n\t\t\tsys.stdout.flush()\n\n\t\t\t##############################################\n\t\t\t# YOUR CODE 3: Take a step in the environment\n\t\t\t# Hints 1 : be careful to use function 'state_process' to deal the RPG state\n\t\t\t# Hints 2 : you can see function \"populate_replay_buffer()\" \n\t\t\t#\t\t\t\tfor detail about how to TAKE A STEP \n\t\t\t# about 2 or 3 line codes\n\t\t\taction = np.random.choice(len(VALID_ACTIONS), p=policy(sess, state, epsilon))\n\t\t\tnext_state, reward, done, _ = env.step(VALID_ACTIONS[action])\n\t\t\tnext_state = state_processor.process(sess, next_state)\n\t\t\tnext_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)\n\n\t\t\t# If our replay memory is full, pop the first element\n\t\t\tif len(replay_memory) == replay_memory_size:\n\t\t\t\treplay_memory.pop(0)\n\n\t\t\t#############################\n\t\t\t# YOUR CODE 4: Save transition to replay memory\n\t\t\t# Hints : you can see function 'populate_replay_buffer' for detail\n\t\t\t# about 1 or 2 line codes\n\t\t\treplay_memory.append( Transition( state, action, reward, next_state, done ) )\n\t\t\t\n\n\t\t\t# Update statistics\n\t\t\tstats.episode_rewards[i_episode] += reward\n\t\t\tstats.episode_lengths[i_episode] = t\n\n\t\t\t#########################################################\n\t\t\t# YOUR CODE 5: Sample a minibatch from the replay memory, \n\t\t\t# hints: can use function \"random.sample( replay_memory, batch_size )\" to get minibatch\n\t\t\t# about 1-2 lines codes\n\t\t\tminibatch = np.array(random.sample(replay_memory, batch_size))\n\t\t\tstate_batch, action_batch, reward_batch, next_state_batch, done_batch = map(np.array, zip(*minibatch))\n\n\n\t\t\t###########################################################\n\t\t\t# YOUR CODE 6: use minibatch sample to calculate q values and targets\n\t\t\t# Hints 1 : use function 'q_estimator.predict' to get q values\n\t\t\t# Hints 2 : use function 'target_estimator.predict' to get targets values\n\t\t\t#\t\t\t\tremember 'targets = reward + gamma * max q( s, a' )'\n\t\t\t# about 2 line codes\n\t\t\t\n\t\t\tq = target_estimator.predict(sess,next_state_batch)\n\t\t\tdone_batch = np.invert(done_batch).astype(float) \n\t\t\ttargets = reward_batch + done_batch * discount_factor * np.max(q, axis = 1)\n\n\t\t\t################################################\n\t\t\t# YOUR CODE 7: Perform gradient descent update\n\t\t\t# hints : use function 'q_estimator.update'\n\t\t\t# about 1 line code\n\t\t\tloss = q_estimator.update(sess,state_batch, np.array(action_batch), targets)\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tstate = next_state\n\t\t\ttotal_t += 1\n\n\t\t# Add summaries to tensorboard\n\t\tepisode_summary = tf.Summary()\n\t\tepisode_summary.value.add(simple_value=stats.episode_rewards[i_episode], node_name=\"episode_reward\", tag=\"episode_reward\")\n\t\tepisode_summary.value.add(simple_value=stats.episode_lengths[i_episode], node_name=\"episode_length\", tag=\"episode_length\")\n\t\tq_estimator.summary_writer.add_summary(episode_summary, total_t)\n\t\tq_estimator.summary_writer.flush()\n\n\t\tyield total_t, plotting.EpisodeStats(\n\t\t\tepisode_lengths=stats.episode_lengths[:i_episode+1],\n\t\t\tepisode_rewards=stats.episode_rewards[:i_episode+1])\n\n\tenv.close()\n\treturn stats\n\ntf.reset_default_graph()\n\n# Where we save our checkpoints and graphs\nexperiment_dir = os.path.abspath(\"./experiments/DQN\")\n\n# Create a glboal step variable\nglobal_step = tf.Variable(0, name='global_step', trainable=False)\n \n# Create estimators\nq_estimator = Estimator(scope=\"q\", summaries_dir=experiment_dir)\ntarget_estimator = Estimator(scope=\"target_q\")\n\n# State processor\nstate_processor = StateProcessor()\n\n# Run it!\nwith tf.Session() as sess:\n\tsess.run(tf.initialize_all_variables())\n\tfor t, stats in deep_q_learning(sess,\n\t\t\t\t\t\t\t\t\tenv,\n\t\t\t\t\t\t\t\t\tq_estimator=q_estimator,\n\t\t\t\t\t\t\t\t\ttarget_estimator=target_estimator,\n\t\t\t\t\t\t\t\t\tstate_processor=state_processor,\n\t\t\t\t\t\t\t\t\texperiment_dir=experiment_dir,\n\t\t\t\t\t\t\t\t\tnum_episodes=5000,\n\t\t\t\t\t\t\t\t\treplay_memory_size=200000,\n\t\t\t\t\t\t\t\t\treplay_memory_init_size=20000,\n\t\t\t\t\t\t\t\t\tupdate_target_estimator_every=10000,\n\t\t\t\t\t\t\t\t\tepsilon_start=1.0,\n\t\t\t\t\t\t\t\t\tepsilon_end=0.1,\n\t\t\t\t\t\t\t\t\tepsilon_decay_steps=200000,\n\t\t\t\t\t\t\t\t\tdiscount_factor=0.99,\n\t\t\t\t\t\t\t\t\tbatch_size=32):\n\t\tprint(\"\\nEpisode Reward: {} timeing: {}\".format(stats.episode_rewards[-1], time.time()))\n\tplot_episode_stats(stats)\n\n", "sub_path": "rl/RL - TOY - DQN and its siblings - tf & torch/dqn.py", "file_name": "dqn.py", "file_ext": "py", "file_size_in_byte": 12899, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "gym.envs.make", "line_number": 16, "usage_type": "call"}, {"api_name": "gym.envs", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.uint8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.flatten", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.fully_connected", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.fully_connected", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.range", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.squared_difference", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.train.RMSPropOptimizer", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.framework.get_global_step", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_max", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.get_global_step", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 116, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 166, "usage_type": "call"}, {"api_name": "lib.plotting.EpisodeStats", "line_number": 172, "usage_type": "call"}, {"api_name": "lib.plotting", "line_number": 172, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 186, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.framework.get_global_step", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 197, "usage_type": "call"}, {"api_name": "gym.wrappers.Monitor", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.get_default_session", "line_number": 222, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 236, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 249, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 281, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 282, "usage_type": "attribute"}, {"api_name": "numpy.invert", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 300, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 307, "usage_type": "call"}, {"api_name": "lib.plotting.EpisodeStats", "line_number": 313, "usage_type": "call"}, {"api_name": "lib.plotting", "line_number": 313, "usage_type": "name"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path", "line_number": 323, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 326, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 337, "usage_type": "call"}, {"api_name": "time.time", "line_number": 353, "usage_type": "call"}]} +{"seq_id": "135985996", "text": "# -*- coding: utf-8 -*\n# ----------------------------------------here we-import files---------------------------------------------------------------\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,redirect\nfrom datetime import timedelta\nfrom demoapp.forms import SignUpForms,LoginForm,PostForm,CommentForm,LikeForm\nfrom demoapp.models import UserModel, SessionToken, PostModel,CommentModel,LikeModel\nfrom django.contrib.auth.hashers import make_password,check_password\nfrom upload_to_win.settings import BASE_DIR\nfrom django.utils import timezone\nfrom imgurpython import ImgurClient\nimport yagmail\nimport ctypes\nimport tkMessageBox\nfrom django.contrib import messages\n# Create your views here.\ndef signup_view(request):\n #------------------------------here is the logic of the functions--------------------------------------------------------\n if request.method == 'POST':\n form = SignUpForms(request.POST)\n if form.is_valid():\n Username = form.cleaned_data['Username']\n Email =form.cleaned_data['Email']\n Name = form.cleaned_data['Name']\n Password = form.cleaned_data['Password']\n # insert data to db\n new_user = UserModel(Name=Name,Password=make_password(Password),Username=Username, Email=Email)\n new_user.save()\n # sending welcome Email To User That Have Signup Successfully\n message = \"Welcome to UPLOAD TO WIN. Your account is sucessfuly created on UPLOAD TO WIN\"\n yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')\n yag.send(to=Email, subject='Upload to win', contents=message)\n ctypes.windll.user32.MessageBoxW(0, u\"You are Successfully Registered.\",\n u\"Done\", 0)\n # SUCCESSFULLY SEND EMAIL TO THE USER WHO HAS SIGNUP.\n\n #--------------------------here we give conditions which open success page or failed page ----------------------------------\n template_name = 'success.html'\n else:\n template_name = 'failed.html'\n else:\n form = SignUpForms()\n template_name = 'signup.html'\n\n return render(request, template_name, {'form':form})\n\n#-------------------------------------create a new function for login user---------------------------------------------------------\ndef login_view(request):\n #----------------------------------here is the function logic-----------------------------------------------------------------\n if request.method == 'GET':\n #Display Login Page\n login_form = LoginForm()\n template_name = 'login.html'\n #---------------------------------------Elif part---------------------------------------------------------------------------------\n elif request.method == 'POST':\n #Process The Data\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n #Validation Success\n Username = login_form.cleaned_data['Username']\n Password = login_form.cleaned_data['Password']\n #read Data From db\n user = UserModel.objects.filter(Username=Username).first()\n if user:\n #compare Password\n if check_password(Password, user.Password):\n token = SessionToken(user = user)\n token.create_token()\n token.save()\n response = redirect('feed/')\n response.set_cookie(key='session_token', value=token.session_token)\n return response\n #successfully Login\n\n template_name = 'login_success.html'\n tkMessageBox.showinfo(title=\"Greetings\", message=\"Hello World!\")\n else:\n\n #Failed\n template_name = 'login_fail.html'\n else:\n #user doesn't exist\n template_name = 'login_fail.html'\n else:\n #Validation Failed\n template_name = 'login_fail.html'\n\n\n return render(request,template_name,{'login_form':login_form})\n\n#-------------------------------------------Create a new function for post --------------------------------------------------------------\ndef post_view(request):\n #-----------------------------------------here is the function logic------------------------------------------------------------\n user = check_validation(request)\n\n if user:\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n image = form.cleaned_data.get('image')\n caption = form.cleaned_data.get('caption')\n post = PostModel(user=user, image=image, caption=caption)\n post.save()\n\n path = str(BASE_DIR+\"//\"+post.image.url)\n\n client = ImgurClient('918e8552c6faccc', '38babe210df5ed9cde17605ac646b24a27f2b58a')\n post.image_url = client.upload_from_path(path,anon=True)['link']\n post.save()\n\n return redirect('/feed/')\n elif request.method == 'GET':\n return redirect('/logout/')\n else:\n form = PostForm()\n return render(request, 'post.html', {'form' : form})\n else:\n return redirect('/login/')\n\n#--------------------------------------------Create a new functions to show the all post of user--------------------------------------\ndef feed_view(request):\n user = check_validation(request)\n if user:\n #-------------------------------------here is the functions logic---------------------------------------------------------------\n\n posts = PostModel.objects.all().order_by('-created_on',)\n\n for post in posts:\n\n existing_like = LikeModel.objects.filter(post_id=post.id, user=user).first()\n if existing_like:\n post.has_liked = True\n\n\n return render(request, 'feed.html', {'posts': posts})\n else:\n\n return redirect('/login/')\n\n\n\n#----------------------------------------------Create a new functions to like the user post-------------------------------------------\ndef like_view(request):\n #-------------------------------------------here is the function logic------------------------------------------------------------\n user = check_validation(request)\n if user and request.method == 'POST':\n form = LikeForm(request.POST)\n if form.is_valid():\n post_id = form.cleaned_data.get('post').id\n existing_like = LikeModel.objects.filter(post_id=post_id, user=user).first()\n if not existing_like:\n like=LikeModel.objects.create(post_id=post_id, user=user)\n email = like.post.user.Email\n # sending welcome Email To User That Have Commented Successfully\n message = \"Hii!.. Someone Liked your Post on Upload To Win.\"\n yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')\n yag.send(to=email, subject='Liked Your Post', contents=message)\n else:\n existing_like.delete()\n\n return redirect('/feed/')\n\n else:\n return redirect('/login/')\n\n#------------------------------------------------Create a new functions to comment on a user post---------------------------------------\ndef comment_view(request):\n #----------------------------------------------here is the function logic-------------------------------------------------------\n user = check_validation(request)\n if user and request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n post_id = form.cleaned_data.get('post').id\n comment_text = form.cleaned_data.get('comment_text')\n comment = CommentModel.objects.create(user=user, post_id=post_id, comment_text=comment_text)\n comment.save()\n email = comment.post.user.Email\n # sending welcome Email To User That Have Commented Successfully\n message = \"Hii! Someone Comented on your Post on Upload To Win.\"\n yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')\n yag.send(to=email, subject='Liked Your Post', contents=message)\n # TODO: ADD MESSAGE TO INDICATE SUCCESS\n return redirect('/feed/')\n else:\n # TODO: ADD MESSAGE FOR FAILING TO POST COMMENT\n return redirect('/feed/')\n else:\n return redirect('/login')\n\n\n\n\n# -----------------------------------------------Create a functions for validating the session---------------------------------------------\ndef check_validation(request):\n #----------------------------------------------here is the function logic----------------------------------------------------------\n if request.COOKIES.get('session_token'):\n session = SessionToken.objects.filter(session_token=request.COOKIES.get('session_token')).first()\n if session:\n time_to_live = session.created_on + timedelta(days=1)\n if time_to_live > timezone.now():\n return session.user\n else:\n return None\n\ndef logout_view(request):\n return render(request,'logout.html')", "sub_path": "upload_to_win/upload_to_win/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9328, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "demoapp.forms.SignUpForms", "line_number": 21, "usage_type": "call"}, {"api_name": "demoapp.models.UserModel", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 28, "usage_type": "call"}, {"api_name": "yagmail.SMTP", "line_number": 32, "usage_type": "call"}, {"api_name": "ctypes.windll.user32.MessageBoxW", "line_number": 34, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 34, "usage_type": "attribute"}, {"api_name": "demoapp.forms.SignUpForms", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "demoapp.forms.LoginForm", "line_number": 53, "usage_type": "call"}, {"api_name": "demoapp.forms.LoginForm", "line_number": 58, "usage_type": "call"}, {"api_name": "demoapp.models.UserModel.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "demoapp.models.UserModel.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "demoapp.models.UserModel", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.check_password", "line_number": 67, "usage_type": "call"}, {"api_name": "demoapp.models.SessionToken", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "tkMessageBox.showinfo", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "demoapp.forms.PostForm", "line_number": 99, "usage_type": "call"}, {"api_name": "demoapp.models.PostModel", "line_number": 103, "usage_type": "call"}, {"api_name": "upload_to_win.settings.BASE_DIR", "line_number": 106, "usage_type": "name"}, {"api_name": "imgurpython.ImgurClient", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "demoapp.forms.PostForm", "line_number": 116, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "demoapp.models.PostModel.objects.all", "line_number": 127, "usage_type": "call"}, {"api_name": "demoapp.models.PostModel.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "demoapp.models.PostModel", "line_number": 127, "usage_type": "name"}, {"api_name": "demoapp.models.LikeModel.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "demoapp.models.LikeModel.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "demoapp.models.LikeModel", "line_number": 131, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "demoapp.forms.LikeForm", "line_number": 148, "usage_type": "call"}, {"api_name": "demoapp.models.LikeModel.objects.filter", "line_number": 151, "usage_type": "call"}, {"api_name": "demoapp.models.LikeModel.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "demoapp.models.LikeModel", "line_number": 151, "usage_type": "name"}, {"api_name": "demoapp.models.LikeModel.objects.create", "line_number": 153, "usage_type": "call"}, {"api_name": "demoapp.models.LikeModel.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "demoapp.models.LikeModel", "line_number": 153, "usage_type": "name"}, {"api_name": "yagmail.SMTP", "line_number": 157, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 165, "usage_type": "call"}, {"api_name": "demoapp.forms.CommentForm", "line_number": 172, "usage_type": "call"}, {"api_name": "demoapp.models.CommentModel.objects.create", "line_number": 176, "usage_type": "call"}, {"api_name": "demoapp.models.CommentModel.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "demoapp.models.CommentModel", "line_number": 176, "usage_type": "name"}, {"api_name": "yagmail.SMTP", "line_number": 181, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 189, "usage_type": "call"}, {"api_name": "demoapp.models.SessionToken.objects.filter", "line_number": 198, "usage_type": "call"}, {"api_name": "demoapp.models.SessionToken.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "demoapp.models.SessionToken", "line_number": 198, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 200, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 201, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 201, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "650595375", "text": "# encoding: utf-8\n\n\"\"\"\nTämä on kesken\n\"\"\"\n\nimport os\nimport sys\nimport traceback\nimport uuid\nimport hashlib\nimport random\nimport string\nimport re\nimport datetime\nimport csv\n\nimport alusta_tietokanta\nimport couch\nfrom WhipAroundRegistry import WarApp\n\netunimet_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"etunimet.txt\")\nsukunimet_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"sukunimet.txt\")\n\netunimia = []\nwith open(etunimet_path) as f:\n etunimia = [line.rstrip() for line in f]\n\nsukunimia = []\nwith open(sukunimet_path) as f:\n sukunimia = [line.rstrip() for line in f]\n\norganisaatioita = [\n {'nimi': u\"Microapple\", 'ytunnus': '12345678'},\n {'nimi': u\"Googlesoft\", 'ytunnus': '12345679'},\n {'nimi': u\"Yahoogle\", 'ytunnus': '12445679'},\n {'nimi': u\"Twitbook\", 'ytunnus': '12545679'},\n {'nimi': u\"FaceIn\", 'ytunnus': '12645679'},\n {'nimi': u\"Företaget Ab\", 'ytunnus': '32345679'},\n {'nimi': u\"Yritys Oy\", 'ytunnus': '92345679'},\n {'nimi': u\"Firma Oy\", 'ytunnus': '82345679'},\n {'nimi': u\"Testiyhdistys Ry\", 'ytunnus': '66345679'}\n]\n\nTIEDOSTO = 'luvat_muokattu.csv'\n\n\ndef kasittele_rivi(rivi, tietokanta, ytunnusdict, numero, kayttaja):\n (\n luettu_organisaatio,\n luettu_alkupvm,\n luettu_loppupvm\n ) = rivi\n\n kerayksen_tyyppi = \"yleishyodyllinen\"\n keraystavat = [\"lipas\", \"tilisiirto\"]\n\n kerays = {\n \"_id\": str(uuid.uuid4()),\n \"kerayksen_numero\": numero,\n \"dokumenttityyppi\": \"kerays\",\n \"luoja\": kayttaja[\"_id\"],\n \"vastuuhenkilot\": [],\n \"nimi\": u\"Organisaation {} keräys\".format(luettu_organisaatio),\n \"kerayksen_tyyppi\": kerayksen_tyyppi,\n \"kuvaus\": u\"Tämä on esimerkkikeräys, joka on luotu vanhojen keräyslupien pohjalta\",\n \"epaonnistuminen\": \"\",\n \"luomisaika\": datetime.datetime.now().isoformat(' '),\n \"alkamispvm\": luettu_alkupvm,\n \"paattymispvm\": luettu_loppupvm,\n \"julkaistu\": True,\n \"keraystavat\": keraystavat,\n \"organisaatio\": {\n \"nimi\": luettu_organisaatio,\n \"ytunnus\": ytunnusdict[luettu_organisaatio]\n },\n \"yhteistyotahot\": None,\n \"rahallinen_tavoite\": 0,\n \"kustannusten_osuus\": 0,\n \"tilinumerot\": [],\n \"linkki\": \"http://www.example.example/\"\n }\n tietokanta.save_doc(kerays)\n\n\ndef luo_supertestikayttaja(tietokanta):\n hetu = \"000000-0000\"\n kayttaja_id = str(uuid.uuid4())\n kayttaja = {\n \"_id\": kayttaja_id,\n \"dokumenttityyppi\": \"kayttaja\",\n \"vetuma_etunimi\": \"Testaaja\",\n \"vetuma_sukunimi\": \"Test\",\n \"vetuma_hetu\": hashlib.sha1(hetu).hexdigest(),\n \"testi_hetu\": hetu,\n \"kayttaja_sahkoposti\": \"NONE\",\n \"organisaatiot\": {},\n \"istuntotunnus\": None,\n \"kirjautumisaika\": None,\n \"sivunlatausaika\": None\n }\n tietokanta.save_doc(kayttaja)\n return kayttaja\n\n\ndef tee_ytunnusdict(tiedostonimi):\n\n ytunnusdict = {}\n\n with open(tiedostonimi, 'rb') as csvfile:\n lukija = csv.reader(csvfile, delimiter=\";\")\n i = 1\n for rivi in lukija:\n ytunnusdict[rivi[0]] = str(i)\n i += 1\n\n return ytunnusdict\n\n\ndef tyhjenna_vanha_ja_alusta():\n tietokanta = couch.BlockingCouch(\"rahankeraysrekisteri\")\n\n tietokanta.delete_db()\n\n tietokanta = alusta_tietokanta.alusta_tietokanta()\n\n return tietokanta\n\n\ndef dummy_kayttajia(tietokanta):\n\n kayttajat = []\n\n for i in xrange(20):\n # Randomisti ei organisaatioita tai 1-2 organisaatiota\n organisaatiot = {}\n if random.choice([True, False]):\n organisaatio = random.choice(organisaatioita)\n organisaatiot[organisaatio[\"ytunnus\"]] = {\n \"nimi\": organisaatio[\"nimi\"],\n \"roolit\": [\"all\"]\n }\n if random.choice([True, False]):\n organisaatio = random.choice(organisaatioita)\n if not organisaatio[\"ytunnus\"] in organisaatiot:\n organisaatiot[organisaatio[\"ytunnus\"]] = {\n \"nimi\": organisaatio[\"nimi\"],\n \"roolit\": [\"all\"]\n }\n hetu = str(random.randint(1, 28)).zfill(2) + \\\n str(random.randint(1, 12)).zfill(2) + \\\n str(random.randint(60, 90)) + \"-\" + \\\n str(random.randint(20, 600)) + \\\n random.choice(string.letters).upper()\n etunimi = random.choice(etunimia) + \"-\" + random.choice(etunimia)\n sukunimi = random.choice(sukunimia)\n sahkoposti = etunimi.replace('-', '').lower() + \".\" + \\\n sukunimi.replace('-', '').lower() + \"@sahkopostia.fi\"\n kayttaja = {\n \"_id\": str(uuid.uuid4()),\n \"dokumenttityyppi\": \"kayttaja\",\n \"vetuma_etunimi\": etunimi,\n \"vetuma_sukunimi\": sukunimi,\n \"vetuma_hetu\": hashlib.sha1(hetu).hexdigest(),\n \"testi_hetu\": hetu,\n \"kayttaja_sahkoposti\": sahkoposti,\n \"organisaatiot\": organisaatiot,\n \"istuntotunnus\": None,\n \"kirjautumisaika\": None,\n \"sivunlatausaika\": None\n }\n kayttajat.append(kayttaja)\n tietokanta.save_doc(kayttaja)\n\n return kayttajat\n\n\ndef lisaa_kerayksia(tietokanta, numero, kayttajat):\n # Demokäyttäjä project review 3:a varten\n organisaatiot = {}\n organisaatiot[\"8892284757\"] = {\n \"nimi\": u\"Suomen laivayhdistys ry\",\n \"roolit\": [\"all\"]\n }\n hetu = u\"230172-253Z\"\n etunimi = u\"Pertti\"\n sukunimi = u\"Virtanen\"\n sahkoposti = \"NONE\"\n kayttaja = {\n \"_id\": str(uuid.uuid4()),\n \"dokumenttityyppi\": \"kayttaja\",\n \"vetuma_etunimi\": etunimi,\n \"vetuma_sukunimi\": sukunimi,\n \"vetuma_hetu\": hashlib.sha1(hetu).hexdigest(),\n \"testi_hetu\": hetu,\n \"kayttaja_sahkoposti\": sahkoposti,\n \"organisaatiot\": organisaatiot,\n \"istuntotunnus\": None,\n \"kirjautumisaika\": None,\n \"sivunlatausaika\": None\n }\n tietokanta.save_doc(kayttaja)\n\n perttin_kerays = {\n \"_id\": str(uuid.uuid4()),\n \"kerayksen_numero\": numero,\n \"dokumenttityyppi\": \"kerays\",\n \"luoja\": kayttaja[\"_id\"],\n \"vastuuhenkilot\": [kayttaja[\"_id\"]],\n \"nimi\": u\"Laivayhdistyksen yhdistystalo\",\n \"kerayksen_tyyppi\": \"joukkorahoitus\",\n \"kuvaus\": u\"Suomen Laivayhdistykselle kerätään rahaa uuden yhdistystalon rakentamiseen. Lahjoittakaa reippaasti!\",\n \"epaonnistuminen\": \"Emme saa epäonnistua!\",\n \"luomisaika\": datetime.datetime.now().isoformat(' '),\n \"alkamispvm\": \"2014-03-01\",\n \"paattymispvm\": \"2014-08-31\",\n \"julkaistu\": True,\n \"keraystavat\": [\"sms\", \"bitcoin\", \"paypal\"],\n \"organisaatio\": {\n \"nimi\": u\"Suomen laivayhdistys ry\",\n \"ytunnus\": \"8892284757\"\n },\n \"yhteistyotahot\": None,\n \"rahallinen_tavoite\": 9000000,\n \"kustannusten_osuus\": 27,\n \"tilinumerot\": [\"FI89 12345 6789012\", \"FI89 12345 6789013\"],\n \"linkki\": \"http://www.vikingline.fi\"\n }\n tietokanta.save_doc(perttin_kerays)\n\n # Keräyksiä\n kayttaja = tietokanta.get_doc(random.choice(kayttajat)[\"_id\"])\n if not kayttaja[\"organisaatiot\"]:\n kayttaja[\"organisaatiot\"] = {}\n kayttaja[\"organisaatiot\"][\"09876544\"] = {\n \"nimi\": \"Koirapuistot For Life Ry\",\n \"roolit\": [\"all\"]\n }\n tietokanta.save_doc(kayttaja)\n kerays1 = {\n \"_id\": str(uuid.uuid4()),\n \"kerayksen_numero\": numero+1,\n \"dokumenttityyppi\": \"kerays\",\n \"luoja\": kayttaja[\"_id\"],\n \"vastuuhenkilot\": [kayttaja[\"_id\"], random.choice(kayttajat)[\"_id\"]],\n \"nimi\": u\"Koirapuistojen kunnostusprojekti\",\n \"kerayksen_tyyppi\": \"yleishyodyllinen\",\n \"kuvaus\": u\"Keräyksestä saaduilla varoilla hankitaan välineitä\\\n ja tarvikkeita koirapuistojen kunnostustalkoisiin.\",\n \"epaonnistuminen\": \"\",\n \"alkamispvm\": \"2014-01-01\",\n \"paattymispvm\": None,\n \"julkaistu\": True,\n \"keraystavat\": [\"lipas\"],\n \"organisaatio\": {\n \"nimi\": \"Koirapuistot For Life Ry\",\n \"ytunnus\": \"09876544\"\n },\n \"yhteistyotahot\": [u\"TESTIkoirat\"],\n \"rahallinen_tavoite\": 10000,\n \"kustannusten_osuus\": 10,\n \"tilinumerot\": [\"FI12 34567 89012345\", \"FI98 12345 98765443\"],\n \"linkki\": \"http://www.facebook.com/KoirapuistotForLife\"\n }\n tietokanta.save_doc(kerays1)\n\n kayttaja = tietokanta.get_doc(random.choice(kayttajat)[\"_id\"])\n if not kayttaja[\"organisaatiot\"]:\n kayttaja[\"organisaatiot\"] = {}\n kayttaja[\"organisaatiot\"][\"1239875\"] = {\n \"nimi\": \"Radio TESTI\",\n \"roolit\": [\"all\"]\n }\n tietokanta.save_doc(kayttaja)\n kerays2 = {\n \"_id\": str(uuid.uuid4()),\n \"kerayksen_numero\": numero+2,\n \"dokumenttityyppi\": \"kerays\",\n \"luoja\": kayttaja[\"_id\"],\n \"vastuuhenkilot\": [kayttaja[\"_id\"]],\n \"nimi\": u\"Radio TESTI kuuntelijamaksu\",\n \"kerayksen_tyyppi\": \"joukkorahoitus\",\n \"kuvaus\": u\"Radio TESTI kerää kuuntelijoilta lahjoituksia, joilla rahoitetaan yrityksen toiminta.\",\n \"epaonnistuminen\": \"\",\n \"luomisaika\": datetime.datetime.now().isoformat(' '),\n \"alkamispvm\": \"2014-03-01\",\n \"paattymispvm\": \"2014-08-31\",\n \"julkaistu\": True,\n \"keraystavat\": [\"sms\", \"bitcoin\", \"paypal\"],\n \"organisaatio\": {\n \"nimi\": \"Radio TESTI\",\n \"ytunnus\": \"1239875\"\n },\n \"yhteistyotahot\": None,\n \"rahallinen_tavoite\": 9000000,\n \"kustannusten_osuus\": 27,\n \"tilinumerot\": [\"FI89 12345 6789012\", \"FI89 12345 6789013\"],\n \"linkki\": \"http://www.radiotesti.fi\"\n }\n tietokanta.save_doc(kerays2)\n\n kayttaja = tietokanta.get_doc(random.choice(kayttajat)[\"_id\"])\n if not kayttaja[\"organisaatiot\"]:\n kayttaja[\"organisaatiot\"] = {}\n kayttaja[\"organisaatiot\"][\"9879873\"] = {\n \"nimi\": \"Tietosanakirja\",\n \"roolit\": [\"all\"]\n }\n tietokanta.save_doc(kayttaja)\n kerays3 = {\n \"_id\": str(uuid.uuid4()),\n \"kerayksen_numero\": numero+3,\n \"dokumenttityyppi\": \"kerays\",\n \"luoja\": kayttaja[\"_id\"],\n \"vastuuhenkilot\": [kayttaja[\"_id\"]],\n \"nimi\": u\"Tietosanakirjan ylläpitokustannukset\",\n \"kerayksen_tyyppi\": \"yleishyodyllinen\",\n \"kuvaus\": u\"Kerä��mme rahaa internetissä toimivan tietosanakirjan ylläpitämiseen. Rahat menevät servereiden ja muun laitteiston ylläpitoon.\",\n \"epaonnistuminen\": \"\",\n \"luomisaika\": datetime.datetime.now().isoformat(' '),\n \"alkamispvm\": \"2011-01-01\",\n \"paattymispvm\": None,\n \"julkaistu\": True,\n \"keraystavat\": [\"paypal\", \"lipas\"],\n \"organisaatio\": {\n \"ytunnus\": \"9879873\",\n \"nimi\": \"Tietosanakirja\"\n },\n \"yhteistyotahot\": None,\n \"rahallinen_tavoite\": 1000000000,\n \"kustannusten_osuus\": 20,\n \"tilinumerot\": [\"FI89 12345 6789012\"],\n \"linkki\": \"http://www.tietosanakirja.example\"\n }\n tietokanta.save_doc(kerays3)\n\n\ndef main():\n tietokanta = tyhjenna_vanha_ja_alusta()\n kayttaja = luo_supertestikayttaja(tietokanta)\n ytunnusdict = tee_ytunnusdict(TIEDOSTO)\n\n numero = 1000\n with open(TIEDOSTO, 'rb') as csvfile:\n lukija = csv.reader(csvfile, delimiter=\";\")\n for rivi in lukija:\n try:\n kasittele_rivi(rivi, tietokanta, ytunnusdict, numero, kayttaja)\n numero += 1\n except ValueError:\n pass\n\n dummy_kayttajat = dummy_kayttajia(tietokanta)\n\n lisaa_kerayksia(tietokanta, numero, dummy_kayttajat)\n\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "WhipAroundRegistry/scripts/alusta_testitietokanta_csvsta.py", "file_name": "alusta_testitietokanta_csvsta.py", "file_ext": "py", "file_size_in_byte": 11806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 23, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 88, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 94, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 111, "usage_type": "call"}, {"api_name": "couch.BlockingCouch", "line_number": 121, "usage_type": "call"}, {"api_name": "alusta_tietokanta.alusta_tietokanta", "line_number": 125, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 137, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 138, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 143, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 144, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 150, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 151, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 152, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 153, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 154, "usage_type": "call"}, {"api_name": "string.letters", "line_number": 154, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 155, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 156, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 160, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 164, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 190, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 194, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 232, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 241, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 245, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 267, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 276, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 285, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 285, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 302, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 311, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 320, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 320, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 345, "usage_type": "call"}]} +{"seq_id": "472058748", "text": "\"\"\"Trains a ResNet on the CIFAR10 dataset.\n\nResNet v1\nDeep Residual Learning for Image Recognition\nhttps://arxiv.org/pdf/1512.03385.pdf\n\nResNet v2\nIdentity Mappings in Deep Residual Networks\nhttps://arxiv.org/pdf/1603.05027.pdf\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nimport numpy as np\nimport os\n\n# Training params.\nbatch_size = 32\nepochs = 180\ndata_augmentation = True\n\n# | | | Orig Paper| | Orig Paper|\n# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | sec/epoch\n# | | %Accuracy | %Accuracy | %Accuracy | %Accuracy | GTX 1080Ti\n# ResNet20 | 3 | 91.95 | 91.25 | 92.57 | - | 58\n# ResNet32 | 5 | 92.00 | 92.49 | 92.22 | - | 96\n# ResNet44 | 7 | 91.07 | 92.83 | 91.02 | - | 128\n# ResNet56 | 9 | 90.25 | 93.03 | 91.37 | - | 163\n# ResNet110 | 18 | 90.23 | 93.39 | 91.22 | 93.63 | 330\nn = 3\n\n# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)\nversion = 1\n\n# Subtracting pixel mean improves accuracy\nuse_pix_mean = True\n\n# Network architecture params.\nnum_classes = 10\nnum_filters = 16\nnum_blocks = 3\nnum_sub_blocks = 2 * n\n\n\n# Learning rate scheduler - called every epoch as part of callbacks\ndef lr_schedule(epoch):\n lr = 1e-3\n if n == 18:\n lr = 1e-4\n if epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print(\"Learning rate: \", lr)\n return lr\n\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Input image dimensions.\n# We assume data format \"channels_last\".\nimg_rows = x_train.shape[1]\nimg_cols = x_train.shape[2]\nchannels = x_train.shape[3]\n\nif K.image_data_format() == 'channels_first':\n img_rows = x_train.shape[2]\n img_cols = x_train.shape[3]\n channels = x_train.shape[1]\n x_train = x_train.reshape(x_train.shape[0], channels, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], channels, img_rows, img_cols)\n input_shape = (channels, img_rows, img_cols)\nelse:\n img_rows = x_train.shape[1]\n img_cols = x_train.shape[2]\n channels = x_train.shape[3]\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)\n input_shape = (img_rows, img_cols, channels)\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\nif use_pix_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# Start model definition.\ninputs = Input(shape=input_shape)\nx = Conv2D(num_filters,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))(inputs)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\n\n# Instantiate convolutional base (stack of blocks).\nfor i in range(num_blocks):\n for j in range(num_sub_blocks):\n strides = 1\n is_first_layer_but_not_first_block = j == 0 and i > 0\n if is_first_layer_but_not_first_block:\n strides = 2\n y = Conv2D(num_filters,\n kernel_size=3,\n padding='same',\n strides=strides,\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))(x)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n y = Conv2D(num_filters,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))(y)\n y = BatchNormalization()(y)\n if version == 2:\n y = Activation('relu')(y)\n if is_first_layer_but_not_first_block:\n x = Conv2D(num_filters,\n kernel_size=1,\n padding='same',\n strides=2,\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))(x)\n x = keras.layers.add([x, y])\n if version != 2:\n x = Activation('relu')(x)\n\n num_filters = 2 * num_filters\n\n# Add classifier on top.\nx = AveragePooling2D()(x)\ny = Flatten()(x)\noutputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n# Instantiate and compile model.\nmodel = Model(inputs=inputs, outputs=outputs)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=lr_schedule(0)),\n metrics=['accuracy'])\nmodel.summary()\n\nif version == 2:\n print(\"ResNet v2\")\nelse:\n print(\"ResNet v1\")\n\n# Prepare model model saving directory.\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'cifar10_resnet_model.{epoch:02d}.h5'\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nfilepath = os.path.join(save_dir, model_name)\n\n# Prepare callbacks for model saving and for learning rate decaying.\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\nlr_scheduler = LearningRateScheduler(lr_schedule)\n\ncallbacks = [checkpoint, lr_scheduler]\n\n# Run training, with or without data augmentation.\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (deg 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally\n height_shift_range=0.1, # randomly shift images vertically\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # Compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\n steps_per_epoch=x_train.shape[0] // batch_size,\n validation_data=(x_test, y_test),\n epochs=epochs, verbose=1, workers=4,\n callbacks=callbacks)\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n", "sub_path": "chapter3/cifar10-resnet.3.2.1.py", "file_name": "cifar10-resnet.3.2.1.py", "file_ext": "py", "file_size_in_byte": 7922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "keras.datasets.cifar10.load_data", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10", "line_number": 70, "usage_type": "name"}, {"api_name": "keras.backend.image_data_format", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 108, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "keras.layers.Input", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 128, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 133, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 134, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 135, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 140, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 151, "usage_type": "attribute"}, {"api_name": "keras.layers.Activation", "line_number": 153, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 159, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 165, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 184, "usage_type": "call"}, {"api_name": "keras.callbacks.LearningRateScheduler", "line_number": 189, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 205, "usage_type": "call"}]} +{"seq_id": "214930681", "text": "from flask import Flask\nfrom flask import request\nfrom flask import render_template\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/write\", methods=[\"GET\", \"POST\"])\ndef board_write():\n if request.method == \"POST\":\n name = request.form.get(\"name\")\n title = request.form.get(\"title\")\n contents = request.form.get(\"contents\")\n else:\n return render_template(\"write.html\")\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True, port=9000)", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "37623113", "text": "import inline as inline\nimport numpy\nimport matplotlib\nimport matplotlib.pyplot as pyplot\nimport scipy.special\n\nclass NeuralNetwork:\n def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n\n # learningrate\n self.lr = learningrate\n\n self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))\n\n # sigmoid activation function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n def train(self, inputs_list, targets_list):\n #train the neural network\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n\n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # calculate the signals emerging fomr hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # calculate signal emerging from final output layer\n final_outputs = self.activation_function(final_inputs)\n\n # error is the (target - actual)\n output_errors = targets - final_outputs\n # hidden layer error is the output_errors, split by weights, recombined at hidden nodes\n hidden_errors = numpy.dot(self.who.T, output_errors)\n\n # update the weights for the links between the hidden and output layers\n self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))\n\n # update the weights for the links between the input and hidden layers\n self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))\n\n\n\n pass\n\n def query(self, input_list):\n\n # input_list in 2d array umrechnen\n\n inputs = numpy.array(input_list, ndmin=2).T\n\n # berechnet eingangssignale für hidden Layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # berechnet Signale aus der Hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # berechnet signale in output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # berechnet signale aus output layer\n final_outputs = self.activation_function(final_inputs)\n\n return final_outputs\n\ninput_nodes = 784\nhidden_nodes = 100\noutput_nodes = 10\nlearning_rate = 0.3\n\n# instatnce of neural network\nn = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n\n# array = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n# print(array.query([1.0, 0.5, -1.5]))\n\ntraining_data_file = open(\"MNIST_DATSET\\mnist_train_100.csv\", 'r')\ntraining_data_list = training_data_file.readlines()\ntraining_data_file.close()\n\n\n\nall_values = training_data_list[2].split(',')\nimage_array = numpy.asfarray(all_values[1:]).reshape((28,28))\nmatplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')\npyplot.show()\n#\n# scaled_imput = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n#\n# # output nodes is 10\n# onodes = 10\n# targets = numpy.zeros(onodes) + 0.01\n# targets[int(all_values[0])] = 0.99\n# print(targets)\n\n# train neural network\n\n# go through all record in the training data set for record in trianing_data_list:\nfor record in training_data_list:\n # split records by commma\n all_values = record.split(',')\n # scale and shift the inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # create the target output values (all 0.01, except the desired label which is 0.99)\n targets = numpy.zeros(output_nodes) + 0.01\n # all_values[0] is the target labe for this record\n targets[int(all_values[0])] = 0.99\n n.train(inputs, targets)\n pass\n\n\n\n", "sub_path": "first.py", "file_name": "first.py", "file_ext": "py", "file_size_in_byte": 4018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.random.normal", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scipy.special.special.expit", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.special.special", "line_number": 20, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asfarray", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.asfarray", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "245658143", "text": "import numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport fnmatch\nimport os\n\nsimulationInfo = np.genfromtxt('simulationInfo.txt')\nnumberOfParticles = int(simulationInfo[0])\nstartTime = int(simulationInfo[1])\nnumberOfParticleFiles = len(fnmatch.filter(os.listdir('Particles/Particle0/'), '*.txt'))\ngridParameters = np.genfromtxt('gridParameters.txt')\n\nlengthOfSimulationBoxInX = gridParameters[6]\nlengthOfSimulationBoxInY = gridParameters[7]\nlengthOfOneBoxInX = gridParameters[0] * gridParameters[3]\nlengthOfOneBoxInY = gridParameters[1] * gridParameters[4]\nnumberOfBoxesInX = lengthOfSimulationBoxInX / lengthOfOneBoxInX\nnumberOfBoxesInY = lengthOfSimulationBoxInY / lengthOfOneBoxInY\n\nX = np.zeros((numberOfParticles,1))\nY = np.zeros((numberOfParticles,1))\nx = []\ny = []\nxnew = []\nynew = []\n\nfor i in range(startTime, startTime + numberOfParticleFiles):\n\t# open figure\n\tfig = plt.figure()\n\tfor p in range(numberOfParticles):\n\t\t# read data from text and save it into array data\n\t\tdata = np.genfromtxt('Particles/Particle'+ str(p) +'/Particle' + str(p) + '_' + str(i) + '.txt')\n\t\t# define variables\n\t\tx.append(data[0][1])\n\t\ty.append(data[0][2])\n\tX = np.c_[X,x]\n\tY = np.c_[Y,y]\n\tx=[]\n\ty=[]\n\tif i == startTime: #or len(X[0]) > 40:\n\t\tX = np.delete(X,0,1)\n\t\tY = np.delete(Y,0,1)\n\tfor p in range(numberOfParticles):\n\t\t# plot x and y value of particle as red dot\n\t\tplt.plot(X[p], Y[p], color = 'r')\n\t# set labels\n\tplt.xlabel(\"X\")\n\tplt.ylabel(\"Y\")\n\t# set axis\n\tplt.xlim([0, lengthOfSimulationBoxInX])\n\tplt.ylim([0, lengthOfSimulationBoxInY])\n\tplt.xticks(np.arange(0, lengthOfSimulationBoxInX + 1, lengthOfOneBoxInX))\n\tplt.yticks(np.arange(0, lengthOfSimulationBoxInY + 1, lengthOfOneBoxInY))\n\tplt.grid(linestyle = \"-\", color='red')\n\t# define filename for saving\n\tfilename = 'img' + str(i - startTime)\n\tfig.savefig(\"png/\" + \"{}.png\".format(filename), bbox_inches='tight', dpi=300)\n\t# close fig\n\tplt.close(fig)\n", "sub_path": "Analysis/plot_borisPusher.py", "file_name": "plot_borisPusher.py", "file_ext": "py", "file_size_in_byte": 1952, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.genfromtxt", "line_number": 8, "usage_type": "call"}, {"api_name": "fnmatch.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "291694571", "text": "# File: GaudiMP/Parallel.py\n# Author: Pere Mato (pere.mato@cern.ch)\n\n\"\"\" GaudiMP.Parallel module.\n This module provides 'parallel' processing support for GaudiPyhton.\n It is adding some sugar on top of public domain packages such as\n the 'multiprocessing' or the 'pp' packages. The interface can be made\n independent of the underlying implementation package.\n Two main class are defined: Task and WorkManager\n\"\"\"\nfrom __future__ import print_function\n__all__ = [ 'Task','WorkManager' ]\nexcluded_varnames = ['HOSTNAME', 'SSH_CLIENT', 'SSH_CONNECTION', 'DISPLAY']\n\nimport sys, os, time, copy\nimport multiprocessing\n\nfrom ostap.utils.progress_bar import ProgressBar\nfrom ostap.logger.logger import getLogger\nfrom ostap.parallel.task import Task, Statistics , StatMerger \nlogger = getLogger('ostap.parallel.mp_gaudi')\n\ndef _prefunction( f, task , jobid , item) :\n return f( ( task , jobid , item ) )\ndef _ppfunction ( args ) :\n #--- Unpack arguments\n task, jobid , item = args\n with Statistics() as stat : \n task.initialize_remote ( jobid )\n result = task.process ( jobid , item )\n stat.stop()\n return result , stat\n\nclass WorkManager(object) :\n \"\"\" Class to in charge of managing the tasks and distributing them to\n the workers. They can be local (using other cores) or remote\n using other nodes in the local cluster \"\"\"\n\n def __init__( self, ncpus='autodetect', ppservers=None , silent = False , **kwargs ) :\n \n if ncpus == 'autodetect' : self.ncpus = multiprocessing.cpu_count()\n else : self.ncpus = ncpus\n \n self.pool = multiprocessing.Pool(self.ncpus)\n self.stats = StatMerger()\n \n self.silent = True if silent else False \n\n def __del__(self):\n if hasattr(self,'server') : self.server.destroy()\n\n def process(self, task, items, timeout=90000):\n if not isinstance(task,Task) :\n raise TypeError(\"task argument needs to be an 'Task' instance\")\n # --- Call the Local initialialization\n task.initialize_local ()\n # --- Schedule all the jobs ....\n \n start = time.time()\n from itertools import repeat , count \n jobs = self.pool.map_async ( _ppfunction, zip( repeat ( task ) , count () , items ))\n \n with ProgressBar ( max_value = len ( items ) , description = \"# Job execution:\" , silent = self.silent ) as bar : \n for result, stat in jobs.get(timeout) :\n task.merge_results ( result )\n self.stats += stat \n bar += 1\n \n end = time.time()\n if not self.silent : \n self.print_statistics()\n logger.info ( 'Time elapsed since server creation %f' % ( end - start ) ) \n # --- Call the Local Finalize\n task.finalize()\n return task.results()\n \n def print_statistics(self):\n self.stats.print_stats ()\n\n# == EOF ====================================================================================\n", "sub_path": "ostap/parallel/mp_gaudi.py", "file_name": "mp_gaudi.py", "file_ext": "py", "file_size_in_byte": 3123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "ostap.logger.logger.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "ostap.parallel.task.Statistics", "line_number": 28, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 41, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 44, "usage_type": "call"}, {"api_name": "ostap.parallel.task.StatMerger", "line_number": 45, "usage_type": "call"}, {"api_name": "ostap.parallel.task.Task", "line_number": 53, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 61, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 61, "usage_type": "call"}, {"api_name": "ostap.utils.progress_bar.ProgressBar", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "396457645", "text": "from PIL import Image\nfrom PIL import ImageDraw\nimport queue\nimport math\n\nwidth = 100\nbackground = (255, 255, 255, 255)\nfill_color = (0, 0, 0, 255)\nim = Image.new(\"RGBA\", (width, width), background)\npen = ImageDraw.Draw(im)\nweight = [[1/16, 1/8, 1/16], [1/8, 1/4, 1/8], [1/16, 1/8, 1/16]]\na = 0\nb = 0\nc = 0\ns = 0\n\ndef draw_point(x, y):\n\tw = 0\n\tfor i in range(3):\n\t\tfor j in range(3):\n\t\t\t_x = x + i * 0.5\n\t\t\t_y = y + j * 0.5\n\t\t\tdist = abs((a * _x + b * _y + c) / s);\n\t\t\tif dist <= 0.5:\n\t\t\t\tw += weight[i][j]\n\tpen.point((x, y), (round(w * fill_color[0] + (1 - w) * background[0]), round(w * fill_color[1] + (1 - w) * background[1]), round(w * fill_color[2] + (1 - w) * background[2]), 255))\n\ndef draw_line(s, t):\n\tsx, sy = s[0], s[1]\n\ttx, ty = t[0], t[1]\n\tif (ty == sy):\n\t\tif (sx > tx):\n\t\t\tsx, tx = tx, sx\n\t\tfor i in range(sx, tx + 1):\n\t\t\tpen.point((i, sy), fill_color)\n\t\treturn \n\tif (tx == sx):\n\t\tif (sy > ty):\n\t\t\tsy, ty = ty, sy\n\t\tfor i in range(sy, ty + 1):\n\t\t\tpen.point((sx, i), fill_color)\n\t\treturn \n\tslope = (ty - sy) / (tx - sx)\n\tif (abs(slope) < 1):\n\t\tif (sx > tx):\n\t\t\tsx, tx, sy, ty = tx, sx, ty, sy\n\t\tdx = tx - sx\n\t\tdy = ty - sy\n\t\tk = dy * 2\n\t\te = 0\n\t\tx, y = sx, sy\n\t\twhile x < tx:\n\t\t\tdraw_point(x, y)\n\t\t\tdraw_point(x, y - 1)\n\t\t\tdraw_point(x, y + 1)\n\t\t\tx += 1\n\t\t\te += k\n\t\t\tif e > dx:\n\t\t\t\ty += 1\n\t\t\t\te -= dx * 2\n\t\t\tif e < -dx:\n\t\t\t\ty -= 1\n\t\t\t\te += dx * 2\n\telse:\n\t\tif (sy > ty):\n\t\t\tsx, tx, sy, ty = tx, sx, ty, sy\n\t\tdx = tx - sx\n\t\tdy = ty - sy\n\t\tk = dx * 2\n\t\te = 0\n\t\tx, y = sx, sy\n\t\twhile y < ty:\n\t\t\tdraw_point(x, y)\n\t\t\tdraw_point(x - 1, y)\n\t\t\tdraw_point(x + 1, y)\n\t\t\ty += 1\n\t\t\te += k\n\t\t\tif e > dy:\n\t\t\t\tx += 1\n\t\t\t\te -= dy * 2\n\t\t\tif e < -dy:\n\t\t\t\tx -= 1\n\t\t\t\te += dy * 2\n\nif __name__ == \"__main__\":\n\tx0, y0, x1, y1 = input(\"please enter coordinates of line segments' start and end point:\").split()\n\tx0 = int(x0)\n\ty0 = int(y0)\n\tx1 = int(x1)\n\ty1 = int(y1)\n\ta = y0 - y1\n\tb = x1 - x0\n\tc = -(a * x0 + b * y0)\n\ts = math.sqrt(a * a + b * b);\n\tdraw_line((x0, y0), (x1, y1))\n\t\n\tim.show()", "sub_path": "Draw Line Anti-Aliasing/pure.py", "file_name": "pure.py", "file_ext": "py", "file_size_in_byte": 1980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PIL.Image.new", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 10, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "55885691", "text": "import numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy import ndimage\nimport scipy.constants as sc\nimport astropy.constants as const\nimport astropy.units as u\n\ndefault_cmap = \"inferno\"\n\nsigma_to_FWHM = 2.0 * np.sqrt(2.0 * np.log(2))\nFWHM_to_sigma = 1.0 / sigma_to_FWHM\narcsec = np.pi / 648000\n\n\ndef spectral_convolution(model, Delta_v, n_window=101):\n # Creating a Hanning function with n_window points\n w = np.hanning(n_window)\n # For each pixel, resampling the spectrum between -FWHM to FWHM\n # then integrating over convolution window\n v_new = model.velocity[iv] + np.linspace(-1, 1, n_window) * Delta_v\n iv_min = int(iv - Delta_v / self.dv - 1)\n iv_max = int(iv + Delta_v / self.dv + 2)\n\n im = np.zeros([self.nx, self.ny])\n for j in range(self.ny):\n for i in range(self.nx):\n f = interpolate.interp1d(self.velocity[iv_min:iv_max], cube[iv_min:iv_max, i, j]) \n im[i, j] = np.average(f(v_new))\n return im\n\n\n\ndef bin_image(im, n, func=np.sum):\n # bin an image in blocks of n x n pixels\n # return a image of size im.shape/n\n\n nx = im.shape[0]\n nx_new = nx // n\n x0 = (nx - nx_new * n) // 2\n\n ny = im.shape[1]\n ny_new = ny // n\n y0 = (ny - ny_new * n) // 2\n\n return np.reshape(\n np.array(\n [\n func(im[x0 + k1 * n : (k1 + 1) * n, y0 + k2 * n : (k2 + 1) * n])\n for k1 in range(nx_new)\n for k2 in range(ny_new)\n ]\n ),\n (nx_new, ny_new),\n )\n\n\ndef Wm2_to_Jy(nuFnu, nu):\n '''\n Convert from W.m-2 to Jy\n nu [Hz]\n '''\n return 1e26 * nuFnu / nu\n\n\ndef Jy_to_Wm2(Fnu, nu):\n '''\n Convert from Jy to W.m-2\n nu [Hz]\n '''\n return 1e-26 * Fnu * nu\n\n\ndef Jybeam_to_Tb(Fnu, nu, bmaj, bmin):\n '''\n Convert Flux density in Jy/beam to brightness temperature [K]\n Flux [Jy]\n nu [Hz]\n bmaj, bmin in [arcsec]\n\n T [K]\n '''\n beam_area = bmin * bmaj * arcsec ** 2 * np.pi / (4.0 * np.log(2.0))\n exp_m1 = 1e26 * beam_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu\n hnu_kT = np.log1p(np.maximum(exp_m1, 1e-10))\n\n Tb = sc.h * nu / (hnu_kT * sc.k)\n\n return Tb\n\n\ndef Jy_to_Tb(Fnu, nu, pixelscale):\n '''\n Convert Flux density in Jy/pixel to brightness temperature [K]\n Flux [Jy]\n nu [Hz]\n bmaj, bmin in [arcsec]\n\n T [K]\n '''\n pixel_area = (pixelscale * arcsec) ** 2\n exp_m1 = 1e16 * pixel_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu\n hnu_kT = np.log1p(exp_m1 + 1e-10)\n\n Tb = sc.h * nu / (hnu_kT * sc.k)\n\n return Tb\n\n\ndef Wm2_to_Tb(nuFnu, nu, pixelscale):\n \"\"\"Convert flux converted from Wm2/pixel to K using full Planck law.\n Convert Flux density in Jy/beam to brightness temperature [K]\n Flux [W.m-2/pixel]\n nu [Hz]\n bmaj, bmin, pixelscale in [arcsec]\n \"\"\"\n pixel_area = (pixelscale * arcsec) ** 2\n exp_m1 = pixel_area * 2.0 * sc.h * nu ** 4 / (sc.c ** 2 * nuFnu)\n hnu_kT = np.log1p(exp_m1)\n\n Tb = sc.h * nu / (sc.k * hnu_kT)\n\n return Tb\n\n\n# -- Functions to deal the synthesized beam.\ndef _beam_area(self):\n \"\"\"Beam area in arcsec^2\"\"\"\n return np.pi * self.bmaj * self.bmin / (4.0 * np.log(2.0))\n\ndef _beam_area_str(self):\n \"\"\"Beam area in steradian^2\"\"\"\n return self._beam_area() * arcsec ** 2\n\ndef _pixel_area(self):\n return self.pixelscale ** 2\n\ndef _beam_area_pix(self):\n \"\"\"Beam area in pix^2.\"\"\"\n return self._beam_area() / self._pixel_area()\n\n\ndef telescope_beam(wl,D):\n \"\"\" wl and D in m, returns FWHM in arcsec\"\"\"\n return 0.989 * wl/D / 4.84814e-6\n\n\ndef make_cut(im, x0,y0,x1,y1,num=None,plot=False):\n \"\"\"\n Make a cut in image 'im' along a line between (x0,y0) and (x1,y1)\n x0, y0,x1,y1 are pixel coordinates\n \"\"\"\n\n if plot:\n vmax = np.max(im)\n vmin = vmax * 1e-6\n norm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=True)\n plt.imshow(im,origin=\"lower\", norm=norm)\n plt.plot([x0,x1],[y0,y1])\n\n\n if num is not None:\n # Extract the values along the line, using cubic interpolation\n x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)\n zi = ndimage.map_coordinates(im, np.vstack((y,x)))\n else:\n # Extract the values along the line at the pixel spacing\n length = int(np.hypot(x1-x0, y1-y0))\n x, y = np.linspace(x0, x1, length), np.linspace(y0, y1, length)\n zi = im[y.astype(np.int), x.astype(np.int)]\n\n return zi\n\n\nclass DustExtinction:\n\n import os\n\n __dirname__ = os.path.dirname(__file__)\n\n wl = []\n kext = []\n\n _extinction_dir = __dirname__ + \"/extinction_laws\"\n _filename_start = \"kext_albedo_WD_MW_\"\n _filename_end = \"_D03.all\"\n V = 5.47e-1 # V band wavelength in micron\n\n def __init__(self, Rv=3.1, **kwargs):\n self.filename = (\n self._extinction_dir\n + \"/\"\n + self._filename_start\n + str(Rv)\n + self._filename_end\n )\n self._read(**kwargs)\n\n def _read(self):\n\n with open(self.filename, 'r') as file:\n f = []\n for line in file:\n if (not line.startswith(\"#\")) and (\n len(line) > 1\n ): # Skipping comments and empty lines\n line = line.split()\n self.wl.append(float(line[0]))\n kpa = float(line[4])\n albedo = float(line[1])\n self.kext.append(kpa / (1.0 - albedo))\n\n # Normalize extinction in V band\n kext_interp = interp1d(self.wl, self.kext)\n kextV = kext_interp(self.V)\n self.kext /= kextV\n\n def redenning(self, wl, Av):\n \"\"\"\n Computes extinction factor to apply for a given Av\n Flux_red = Flux * redenning\n\n wl in micron\n\n \"\"\"\n kext_interp = interp1d(self.wl, self.kext)\n kext = kext_interp(wl)\n tau_V = 0.4 * np.log(10.0) * Av\n\n return np.exp(-tau_V * kext)\n\n\ndef Hill_radius():\n pass\n #d * (Mplanet/3*Mstar)**(1./3)\n\n\ndef splash2mcfost(anglex, angley, anglez):\n #Convert the splash angles to mcfost angles\n\n # Base unit vector\n x0 = [1,0,0]\n y0 = [0,1,0]\n z0 = [0,0,1]\n\n # Splash rotated vectors\n x = _rotate_splash_axes(x0,-anglex,-angley,-anglez)\n y = _rotate_splash_axes(y0,-anglex,-angley,-anglez)\n z = _rotate_splash_axes(z0,-anglex,-angley,-anglez)\n\n # MCFOST angles\n mcfost_i = np.arccos(np.dot(z,z0)) * 180./np.pi\n\n if abs(mcfost_i) > 1e-30:\n print(\"test1\")\n # angle du vecteur z dans le plan (-y0,x0)\n mcfost_az = (np.arctan2(np.dot(z,x0), -np.dot(z,y0)) ) * 180./np.pi\n # angle du vecteur z0 dans le plan x_image, y_image (orientation astro + 90deg)\n mcfost_PA = -( np.arctan2(np.dot(x,z0), np.dot(y,z0)) ) * 180./np.pi\n else:\n print(\"test2\")\n mcfost_az = 0.\n # angle du vecteur y dans le plan x0, y0\n mcfost_PA = (np.arctan2(np.dot(y,x0),np.dot(y,y0)) ) * 180./np.pi\n\n\n print(\"anglex =\",anglex, \"angley=\", angley, \"anglez=\", anglez,\"\\n\")\n print(\"Direction to oberver=\",z)\n print(\"x-image=\",x)\n print(\"y_image = \", y,\"\\n\")\n print(\"MCFOST parameters :\")\n print(\"inclination =\", mcfost_i)\n print(\"azimuth =\", mcfost_az)\n print(\"PA =\", mcfost_PA)\n\n return [mcfost_i, mcfost_az, mcfost_PA]\n\ndef _rotate_splash(xyz, anglex, angley, anglez):\n # Defines rotations as in splash\n # This function is to rotate the data\n\n x = xyz[0]\n y = xyz[1]\n z = xyz[2]\n\n # rotate about z\n if np.abs(anglez) > 1e-30:\n r = np.sqrt(x**2+y**2)\n phi = np.arctan2(y,x)\n phi -= anglez/180*np.pi\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n\n # rotate about y\n if np.abs(angley) > 1e-30:\n r = np.sqrt(z**2+x**2)\n phi = np.arctan2(z,x)\n phi -= angley/180*np.pi\n x = r*np.cos(phi)\n z = r*np.sin(phi)\n\n # rotate about x\n if np.abs(anglex) > 1e-30:\n r = np.sqrt(y**2+z**2)\n phi = np.arctan2(z,y)\n phi -= anglex/180*np.pi\n y = r*np.cos(phi)\n z = r*np.sin(phi)\n\n return np.array([x,y,z])\n\n\ndef _rotate_splash_axes(xyz, anglex, angley, anglez):\n # Defines rotations as in splash, but in reserve order\n # as we rotate the axes instead of the data\n\n x = xyz[0]\n y = xyz[1]\n z = xyz[2]\n\n # rotate about x\n if np.abs(anglex) > 1e-30:\n r = np.sqrt(y**2+z**2)\n phi = np.arctan2(z,y)\n phi -= anglex/180*np.pi\n y = r*np.cos(phi)\n z = r*np.sin(phi)\n\n # rotate about y\n if np.abs(angley) > 1e-30:\n r = np.sqrt(z**2+x**2)\n phi = np.arctan2(z,x)\n phi -= angley/180*np.pi\n x = r*np.cos(phi)\n z = r*np.sin(phi)\n\n # rotate about z\n if np.abs(anglez) > 1e-30:\n r = np.sqrt(x**2+y**2)\n phi = np.arctan2(y,x)\n phi -= anglez/180*np.pi\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n\n return np.array([x,y,z])\n\ndef rotate_vec(u,v,angle):\n '''\n rotate a vector (u) around an axis defined by another vector (v)\n by an angle (theta) using the Rodrigues rotation formula\n '''\n k = v/np.sqrt(np.inner(v,v))\n w = np.cross(k,u)\n k_dot_u = np.inner(k,u)\n for i,uval in enumerate(u):\n u[i] = u[i]*np.cos(angle) + w[i]*np.sin(angle) + k[i]*k_dot_u*(1.-np.cos(angle))\n return u\n\ndef rotate_coords(x,y,z,inc,PA):\n '''\n rotate x,y,z coordinates into the observational plane\n '''\n k = [-np.sin(PA), np.cos(PA), 0.]\n xvec = [x,y,z]\n xrot = rotate_vec(xvec,k,inc)\n return xrot[0],xrot[1],xrot[2]\n\ndef rotate_to_obs_plane(x,y,inc,PA):\n '''\n same as rotate_coords but takes 2D x,y as arrays\n '''\n for i,xx in enumerate(x): # this can probably be done more efficiently\n x[i],y[i],dum = rotate_coords(x[i],y[i],0.,inc,PA)\n return x,y\n\ndef planet_position(model, i_planet, i_star, ):\n '''\n Returns planet position [arcsec] and PA [deg] in the map\n '''\n xy_planet = model.star_positions[:,0,0,i_planet]\n xy_star = model.star_positions[:,0,0,i_star]\n dxy = xy_planet - xy_star\n\n dist = np.hypot(dxy[0],dxy[1])\n PA = np.rad2deg(np.arctan2(dxy[1],-dxy[0])) + 360 - 90\n\n return [dist, PA]\n", "sub_path": "pymcfost/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 10299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.hanning", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.constants.h", "line_number": 83, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 83, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.log1p", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 84, "usage_type": "call"}, {"api_name": "scipy.constants.h", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 86, "usage_type": "name"}, {"api_name": "scipy.constants.k", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scipy.constants.h", "line_number": 101, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 101, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.log1p", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.constants.h", "line_number": 104, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 104, "usage_type": "name"}, {"api_name": "scipy.constants.k", "line_number": 104, "usage_type": "attribute"}, {"api_name": "scipy.constants.h", "line_number": 117, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 117, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.log1p", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.constants.h", "line_number": 120, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 120, "usage_type": "name"}, {"api_name": "scipy.constants.k", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.ndimage.map_coordinates", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 213, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 256, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 289, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 324, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 340, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 352, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 352, "usage_type": "argument"}, {"api_name": "numpy.inner", "line_number": 353, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 353, "usage_type": "argument"}, {"api_name": "astropy.units", "line_number": 354, "usage_type": "argument"}, {"api_name": "astropy.units", "line_number": 355, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 355, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 356, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 384, "usage_type": "call"}]} +{"seq_id": "352500380", "text": "from threading import Thread, Lock\n\nimport cv2\n\nfrom peekingduck.pipeline.nodes.input.utils.preprocess import set_res, mirror\n\n\nclass VideoThread:\n '''\n Videos will be threaded to prevent I/O blocking from affecting FPS.\n '''\n\n def __init__(self, res, input_source, mirror_image):\n self.stream = cv2.VideoCapture(input_source)\n self.mirror = mirror_image\n if not self.stream.isOpened():\n raise ValueError(\"Camera or video input not detected: %s\" % input_source)\n\n width, height = res['width'], res['height']\n set_res(self.stream, width, height)\n self._lock = Lock()\n thread = Thread(target=self._reading_thread, args=(), daemon=True)\n thread.start()\n\n def _reading_thread(self):\n '''\n A thread that continuously polls the camera for frames.\n '''\n while True:\n _, self.frame = self.stream.read()\n\n def read_frame(self):\n '''\n Reads the frame.\n '''\n self._lock.acquire()\n if self.frame is not None:\n frame = self.frame.copy()\n self._lock.release()\n if self.mirror:\n frame = mirror(frame)\n return True, frame\n\n self._lock.release()\n return False, None\n\n\nclass VideoNoThread:\n '''\n No threading to deal with recorded videos and images.\n '''\n\n def __init__(self, res, input_source, mirror_image):\n self.stream = cv2.VideoCapture(input_source)\n self.mirror = mirror_image\n if not self.stream.isOpened():\n raise ValueError(\"Video or image path incorrect: %s\" % input_source)\n\n width, height = res['width'], res['height']\n set_res(self.stream, width, height)\n\n def read_frame(self):\n '''\n Reads the frame.\n '''\n return self.stream.read()\n", "sub_path": "peekingduck/pipeline/nodes/input/utils/read.py", "file_name": "read.py", "file_ext": "py", "file_size_in_byte": 1851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "peekingduck.pipeline.nodes.input.utils.preprocess.set_res", "line_number": 20, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 21, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "call"}, {"api_name": "peekingduck.pipeline.nodes.input.utils.preprocess.mirror", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 54, "usage_type": "call"}, {"api_name": "peekingduck.pipeline.nodes.input.utils.preprocess.set_res", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "603303822", "text": "from flask import Flask, request, jsonify, abort, Response, render_template\nfrom flask_assets import Environment, Bundle\nfrom webassets_browserify import Browserify\n\nfrom latex import build_pdf, LatexBuildError\nfrom latex.jinja2 import make_env\n\nfrom songs import get_songs\n\nfrom string import digits,letters\n\napp = Flask(__name__, static_url_path='/static')\n\nassets = Environment(app)\njs = Bundle('js/main.jsx',\n depends=('*/*.js*'),\n filters=Browserify,\n output='app.js')\nassets.register('js_all', js)\n\n@app.route('/')\ndef index(): return render_template('index.html')\n\nsong_dict = get_songs()\nsong_list = song_dict.values()\n@app.route('/songs/')\n@app.route('/songs/')\ndef songs(songid=None):\n def filter_keys(item):\n return {\n key: item[key]\n for key in [\n 'songid',\n 'songtitle',\n 'firstline',\n 'songmeta',\n 'songtext',\n 'songnotes'\n ]\n }\n\n if songid == None: return jsonify(songs=map(filter_keys, song_list))\n if songid in song_dict: return jsonify(filter_keys(song_dict[songid]))\n else: return abort(404)\n\ndef whitelist(string, alphabet):\n return ''.join([x for x in string if x in alphabet])\n\ntexenv = make_env(loader=app.jinja_loader)\n@app.route('/songs.pdf')\ndef pdf():\n texonly = 'texonly' in request.args\n orientation = 'landscape' if 'landscape' in request.args else 'portrait'\n cols = whitelist(request.args.get('cols', ''), digits) or '2'\n font = whitelist(request.args.get('font', ''), digits+letters)\n fontoptions = whitelist(request.args.get('fontoptions', ''), digits+letters)\n songids = request.args.get('songids')\n\n if songids:\n try:\n songids = map(int, songids.split(','))\n except ValueError:\n return 'Invalid songid'\n else:\n return 'No songs'\n\n template = texenv.get_template('songs.tex')\n tex = template.render(\n songs=[song_dict[x] for x in songids if x in song_dict],\n cols=cols,\n orientation=orientation,\n font=font,\n fontoptions=fontoptions)\n\n if texonly:\n return Response(tex, mimetype='text/plain')\n else:\n try:\n pdffile = build_pdf(tex)\n except LatexBuildError as e:\n return Response(tex, mimetype='text/plain')\n\n return Response(bytes(pdffile), mimetype='application/pdf')\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "audio.py", "file_name": "audio.py", "file_ext": "py", "file_size_in_byte": 2562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_assets.Environment", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_assets.Bundle", "line_number": 15, "usage_type": "call"}, {"api_name": "webassets_browserify.Browserify", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "songs.get_songs", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 44, "usage_type": "call"}, {"api_name": "latex.jinja2.make_env", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "string.digits", "line_number": 54, "usage_type": "argument"}, {"api_name": "flask.request.args.get", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "string.digits", "line_number": 55, "usage_type": "name"}, {"api_name": "string.letters", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "string.digits", "line_number": 56, "usage_type": "name"}, {"api_name": "string.letters", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 76, "usage_type": "call"}, {"api_name": "latex.build_pdf", "line_number": 79, "usage_type": "call"}, {"api_name": "latex.LatexBuildError", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "41861621", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\" RPS-LS\n\n\tClassic Rock Paper Scissors mini game and LS extension\n\n\t1.0.0\n\t\tInitial release\n\n\tUpcoming features\n\t\tlatin to ascii comparisons (Lézard == Lezard) should be True\n\t\tAdd multiple way to write a choice (ex. French: Pierre, Roche)\n\n\"\"\"\n\n# --------------------------------------\n# Script Import Libraries\n# --------------------------------------\nimport clr\nimport os\nimport json\nimport codecs\nimport re\n\nclr.AddReference(\"IronPython.SQLite.dll\")\nclr.AddReference(\"IronPython.Modules.dll\")\n\n\n# --------------------------------------\n# Script Information\n# --------------------------------------\nScriptName = \"RPS-LS\"\nWebsite = \"https://github.com/CVex2150J\"\nDescription = \"Rock Paper Scissors LS\"\nCreator = \"CVex2150J\"\nVersion = \"1.0.0\"\n\n\n# --------------------------------------\n# Script Variables\n# --------------------------------------\nSettingsFile = os.path.join(os.path.dirname(__file__), \"settings.json\")\ncooldown_command = \"!rps\"\nlocal = {}\n\nwinningTable = [\n\t# 0: rock, 1: paper, 2: scissors, 3: lizard, 4: Spock\n\t[2, 1, 5], # cuts\n\t[1, 0, 6], # covers\n\t[0, 2, 7], # crushes\n\t[0, 3, 8], # crushes\n\t[3, 4, 9], # poisons\n\t[4, 2, 10], # smashes\n\t[2, 3, 11], # decapitates\n\t[3, 1, 12], # eats\n\t[1, 4, 13], # disproves\n\t[4, 0, 14] # vaporizes\n]\n\n\n# --------------------------------------\n# Script Classes\n# --------------------------------------\nclass Settings(object):\n\t\"\"\" Load in saved settings file if available else set default values. \"\"\"\n\n\tclassic_command = \"!rps\"\n\tlizardspock_command = \"!rpsls\"\n\tlocalisation_file = \"local_en.txt\"\n\treward = 100\n\tuser_cooldown = 60\n\n\tdef __init__(self, settingsfile=None):\n\t\ttry:\n\t\t\twith codecs.open(settingsfile, encoding=\"utf-8-sig\", mode=\"r\") as f:\n\t\t\t\tself.__dict__ = json.load(f, encoding=\"utf-8\")\n\t\texcept:\n\t\t\treturn\n\n\tdef Reload(self, jsondata):\n\t\t\"\"\" Reload settings from interface by given json data. \"\"\"\n\t\tself.__dict__ = json.loads(jsondata, encoding=\"utf-8\")\n\n\n# --------------------------------------\n# Script Functions\n# --------------------------------------\n# Utilities\ndef Log(message):\n\tParent.Log(ScriptName, str(message))\n\n\ndef Message(message):\n\tParent.SendStreamMessage(str(message))\n\n\n# def Whisper(target, message):\n# \tParent.SendStreamWhisper(str(target), str(message))\n\n\n# Functions\ndef LoadLocalisation():\n\n\tglobal local\n\n\ttry:\n\t\t# Parse localisation file\n\t\tfile_name = os.path.join(os.path.dirname(__file__), ScriptSettings.localisation_file)\n\t\t_file = codecs.open(file_name, encoding=\"utf-8-sig\", mode=\"r\")\n\n\t\t# get all lines, strip \\n and remove any comments commencing with #\n\t\tlines = [re.sub('#.*', '', line.rstrip('\\r\\n')) for line in _file]\n\t\t# discard all empty and comment line\n\t\tlocal = list(filter(lambda x: x, lines))\n\n\texcept Exception as e:\n\t\tLog(\"ERROR : Unable to parse localisation file.\" + str(e))\n\n\ndef add_user_cooldown(data):\n\tif ScriptSettings.user_cooldown > 0:\n\t\tParent.AddUserCooldown(ScriptName, cooldown_command, data.User, ScriptSettings.user_cooldown)\n\n\ndef giveReward(data):\n\tif ScriptSettings.reward > 0:\n\t\tParent.AddPoints(data.User, data.UserName, ScriptSettings.reward)\n\n\ndef show_result(data, u, c, win):\n\n\tif u != c:\n\t\tif u == win[0]:\n\t\t\t# win\n\t\t\tgiveReward(data)\n\t\t\tresult = local[15]\n\t\telse:\n\t\t\t# loose\n\t\t\tresult = local[16]\n\n\t\tresult = result.replace('{phrase}', local[win[0]] + ' ' + local[win[2]] + ' ' + local[win[1]])\n\telse:\n\t\t# tie\n\t\tresult = local[17]\n\n\tresult = result.replace('{user}', data.UserName)\n\t# result = result.replace('{bot}', ... Bot name ? )\n\tresult = result.replace('{user_pick}', local[u])\n\tresult = result.replace('{bot_pick}', local[c])\n\n\tMessage(result)\n\n\n# limit 3 : classic rock, paper, scissors\n# limit 5 : rock, paper, scissors, lizard, Spock\ndef play(data, limit=3):\n\n\tif ScriptSettings.user_cooldown > 0:\n\t\tduration = Parent.GetUserCooldownDuration(ScriptName, cooldown_command, data.User)\n\t\tif duration > 0:\n\t\t\t# Message(data.UserName + ' can\\'t use this command for another ' + str(duration) + ' seconds.')\n\t\t\treturn\n\n\t# parse parameter 1 and try to find its index 1-3 in classic or 1-5 in LS mode\n\tuser_choice_str = data.GetParam(1).lower()\n\n\tuser_choice = -1\n\tfor c in local:\n\t\tuser_choice += 1\n\t\tif user_choice > limit:\n\t\t\tuser_choice = -1\n\t\t\tbreak\n\t\telif c.lower() == user_choice_str:\n\t\t\tbreak\n\n\t# user_choice -1 : the user gives and invalid option\n\tif user_choice != -1:\n\n\t\tadd_user_cooldown(data)\n\n\t\t# random computer choice\n\t\tcomputer_choice = Parent.GetRandom(0, limit) # Limit is excluded\n\n\t\tif user_choice == computer_choice:\n\t\t\t# Equality\n\t\t\tshow_result(data, user_choice, computer_choice, None)\n\t\telse:\n\t\t\t# Find the choice combination\n\t\t\tfor win in winningTable:\n\t\t\t\tif user_choice in win and computer_choice in win:\n\t\t\t\t\tshow_result(data, user_choice, computer_choice, win)\n\t\t\t\t\tbreak\n\n\n# --------------------------------------\n# Chatbot Initialize Function\n# --------------------------------------\ndef Init():\n\n\tglobal ScriptSettings\n\n\t# Load settings from settings file\n\tScriptSettings = Settings(SettingsFile)\n\n\tLoadLocalisation()\n\n\n# --------------------------------------\n# Chatbot Save Settings Function\n# --------------------------------------\ndef ReloadSettings(jsondata):\n\n\t# Reload newly saved settings and verify\n\tScriptSettings.Reload(jsondata)\n\tLoadLocalisation()\n\n\n# --------------------------------------\n# Chatbot Execute Function\n# --------------------------------------\ndef Execute(data):\n\t# Twitch chat message only for now\n\tif not data.IsFromTwitch() or not data.IsChatMessage() or data.IsWhisper():\n\t\treturn\n\tcommand = data.GetParam(0).lower()\n\tif len(ScriptSettings.lizardspock_command) > 0 and command == ScriptSettings.lizardspock_command.lower():\n\t\tplay(data, 5)\n\telif len(ScriptSettings.classic_command) > 0 and command == ScriptSettings.classic_command.lower():\n\t\tplay(data, 3)\n\treturn\n\n\n# --------------------------------------\n# Chatbot Script Unload Function\n# --------------------------------------\ndef Unload():\n\treturn\n\n\n# --------------------------------------\n# Chatbot Tick Function\n# --------------------------------------\ndef Tick():\n\treturn", "sub_path": "RPS-LS_StreamlabsSystem.py", "file_name": "RPS-LS_StreamlabsSystem.py", "file_ext": "py", "file_size_in_byte": 6109, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "clr.AddReference", "line_number": 26, "usage_type": "call"}, {"api_name": "clr.AddReference", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 76, "usage_type": "call"}, {"api_name": "json.load", "line_number": 77, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 109, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 110, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "563922917", "text": "\"\"\"add unique together constraint for shop item category and code name\n\nRevision ID: b412a7dfa61f\nRevises: 8ba89ef9391d\nCreate Date: 2018-07-07 14:48:10.809261\n\n\"\"\"\nfrom alembic import op\n\n\n# revision identifiers, used by Alembic.\nrevision = \"b412a7dfa61f\"\ndown_revision = \"8ba89ef9391d\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # Requires that the table already fulfills the constraint\n with op.batch_alter_table(\"shop_item\", schema=None) as batch_op:\n batch_op.create_unique_constraint(\n \"uq_category_id_code_name\", [\"category_id\", \"code_name\"])\n\n\ndef downgrade():\n with op.batch_alter_table(\"shop_item\", schema=None) as batch_op:\n batch_op.drop_constraint(\"uq_category_id_code_name\")\n", "sub_path": "meme_machine/alembic/versions/b412a7dfa61f_add_unique_together_constraint_for_shop_.py", "file_name": "b412a7dfa61f_add_unique_together_constraint_for_shop_.py", "file_ext": "py", "file_size_in_byte": 737, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "alembic.op.batch_alter_table", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "273748052", "text": "# importing all the required libraries\nimport os\nimport sys\nimport torch\nimport pandas as pd\nfrom os.path import abspath\nfrom argparse import ArgumentParser\n\n# importing local modules\nscript_path = os.path.abspath('')\nsys.path.insert(0, abspath(script_path))\n# print(abspath(script_path))\n\nfrom utils.utilities import load_check_point, \\\n tokenizer_nltk, load_dict_from_disk\n\n# importing model\nfrom model.model import DCNN_TREC\n\n# importing the model parameters\nfrom utils.model_parameters import TREC_DATASET_PARAMETERS\n\nparser = ArgumentParser()\nparser.add_argument(\n \"--embedding_dim\", help=\"Mention the dimension of embedding.\",\n type=int,\n default=50\n)\nparser.add_argument(\n \"--sentence_length\", help=\"Fix the sentence length for each sentence.\",\n type=int,\n default=10\n)\nparser.add_argument(\n \"--saved_model_path\", help=\"Mention the path where model is saved.\",\n type=str,\n default=None\n)\nparser.add_argument(\n \"--saved_vocab_path\", help=\"Mention the path where vocab is saved.\",\n type=str,\n default=None\n)\nparser.add_argument(\n \"--device\", help=\"Mention the device to be used cuda or cpu,\",\n type=str,\n default=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n)\nparser.add_argument(\n \"--glove_file_path\", help=\"Mention the path where glove embeddings are saved.\",\n type=str,\n default=\"/home/neo/glove.6B.50d.txt\"\n)\nparser.add_argument(\n \"--file_to_predict_on\", help=\"Mention the path of the csv file to predict on.\",\n type=str,\n default=None\n)\nparser.add_argument(\n \"--file_to_save_predictions\", help=\"Mention the path of the csv file to save predictions.\",\n type=str,\n default=None\n)\narguments = parser.parse_args()\nEMBEDDING_DIM = arguments.embedding_dim\nTREC_DATASET_PARAMETERS[\"embedding_dim\"] = EMBEDDING_DIM\n\nSENT_LENGTH = arguments.sentence_length\nTREC_DATASET_PARAMETERS[\"cell_one_parameter_dict\"][\"sent_length\"] = SENT_LENGTH\n\nMODEL_PATH = arguments.saved_model_path\nVOCAB_PATH = arguments.saved_vocab_path\nSAVE_PATH = arguments.file_to_save_predictions\nDEVICE = arguments.device\n\nFILE_TO_PREDICT_ON = arguments.file_to_predict_on\n\nGLOVE_FILE_PATH = arguments.glove_file_path\n\n\ndef return_indexed(vocab_obj, tokenized):\n indexed = []\n for i in tokenized:\n if len(i) < SENT_LENGTH:\n i = i + [\"\"]*(SENT_LENGTH - len(i))\n\n if len(i) > SENT_LENGTH:\n i = i[:SENT_LENGTH]\n temp = []\n for j in i:\n temp.append(vocab_obj.stoi[j])\n indexed.append(temp)\n return indexed\n\n\n\ndef predict_using_model(model, vocab):\n df = pd.read_csv(FILE_TO_PREDICT_ON)\n df[\"sentence_tokenized\"] = df[\"sentence\"].apply(lambda x: tokenizer_nltk(x))\n df[\"indexed\"] = return_indexed(vocab, df[\"sentence_tokenized\"])\n\n input_tensor = torch.LongTensor(list(df[\"indexed\"])).to(DEVICE)\n model_outputs = model(input_tensor).squeeze(1)\n preds, ind = torch.max(torch.nn.functional.softmax(model_outputs, dim=-1), 1)\n preds = preds.cpu().detach().numpy()\n ind = ind.cpu().detach().numpy()\n\n df[\"predictions\"] = ind\n df[\"probabilities\"] = preds\n df = df[[\"sentence\", \"predictions\", \"probabilities\"]]\n df.to_csv(SAVE_PATH, index=False, encoding=\"utf-8\")\n return\n\n\nif __name__ == \"__main__\":\n\n vocab = load_dict_from_disk(VOCAB_PATH)\n TREC_DATASET_PARAMETERS[\"vocab_length\"] = len(vocab.stoi)\n\n model = DCNN_TREC(parameter_dict=TREC_DATASET_PARAMETERS)\n\n model.to(DEVICE)\n\n model = load_check_point(model, MODEL_PATH)\n predict_using_model(model, vocab)\n\n print(\"\\n\\n\")\n print(\"FINISH\")\n print(\"############################################################################\")\n\n", "sub_path": "scripts/predict_using_trec.py", "file_name": "predict_using_trec.py", "file_ext": "py", "file_size_in_byte": 3681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utils.model_parameters.TREC_DATASET_PARAMETERS", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.model_parameters.TREC_DATASET_PARAMETERS", "line_number": 69, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.utilities.tokenizer_nltk", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 102, "usage_type": "call"}, {"api_name": "model.model", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "attribute"}, {"api_name": "utils.utilities.load_dict_from_disk", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.model_parameters.TREC_DATASET_PARAMETERS", "line_number": 118, "usage_type": "name"}, {"api_name": "model.model", "line_number": 120, "usage_type": "name"}, {"api_name": "model.model.DCNN_TREC", "line_number": 120, "usage_type": "call"}, {"api_name": "utils.model_parameters.TREC_DATASET_PARAMETERS", "line_number": 120, "usage_type": "name"}, {"api_name": "model.model.to", "line_number": 122, "usage_type": "call"}, {"api_name": "model.model", "line_number": 122, "usage_type": "name"}, {"api_name": "model.model", "line_number": 124, "usage_type": "name"}, {"api_name": "utils.utilities.load_check_point", "line_number": 124, "usage_type": "call"}, {"api_name": "model.model", "line_number": 125, "usage_type": "argument"}]} +{"seq_id": "553457420", "text": "import requests\nfrom os import path\nimport os\nimport http.cookiejar\nagent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'\n\nheader = {\n 'HOST' : 'www.zhihu.com',\n 'referer' : 'https://www.zhihu.com/',\n 'user-agent' : agent\n}\n\nfrom selenium import webdriver\nimport time\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('lang=zh_CN.UTF-8')\noptions.add_argument('user-agent=\"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20\"')\nbrowser = webdriver.Chrome(chrome_options=options)\n# browser = webdriver.Chrome()\n\nbrowser.get(\"https://www.zhihu.com/signin\")\nbrowser.find_element_by_css_selector(\".SignFlow-accountInput.Input-wrapper input\").send_keys(\n \"13760710096\")\ntime.sleep(1)\nbrowser.find_element_by_css_selector(\".SignFlow-password input\").send_keys(\n \"XQY1197966810G\")\ntime.sleep(2)\nbrowser.find_element_by_css_selector(\n \".Button.SignFlow-submitButton\").click()\ntime.sleep(3)\nbrowser.get(\"https://www.zhihu.com/\")\n\ntime.sleep(6)\nzhihu_cookies = browser.get_cookies()\nprint(\"aaa\", zhihu_cookies)\ncookie_dict = {}\nimport pickle\nfor cookie in zhihu_cookies:\n base_path = path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'cookies')\n print(base_path)\n f = open(base_path + \"/zhihu/\" + cookie['name'] + '.zhihu', 'wb')\n pickle.dump(cookie, f)\n f.close()\n cookie_dict[cookie['name']] = cookie['value']\nbrowser.close()\n", "sub_path": "ArticleSpider/ArticleSpider/utils/zhihu_login_requests.py", "file_name": "zhihu_login_requests.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 19, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "595002045", "text": "#!/usr/bin/env python\n\nimport time, sys, logging\nfrom daemon2x import Daemon\nfrom lib_oled96 import ssd1306\nfrom PIL import ImageFont, ImageDraw, Image\nfrom smbus import SMBus\n\n# Logging\nlogging.basicConfig(filename='/home/pi/oled/lib_oled96/clock.log',\n filemode='a',\n\t\t\t\t\t\t\tformat='[%(asctime)s] %(message)s',\n\t\t\t\t\t\t\tdatefmt='%Y/%d/%m %H:%M:%S',\n level=logging.INFO)\n\n# Setup display\ni2cbus = SMBus(1)\t\t# 1 = Raspberry Pi but NOT early REV1 board\noled = ssd1306(i2cbus)\t# create oled object, nominating the correct I2C bus, default address\ndraw = oled.canvas\t\t# \"draw\" onto this canvas, then call display() to send the canvas contents to the hardware.\n\n# Hello World\n#oled.canvas.text((40,40), 'Hello World!', fill=1)\n\n#Setup fonts\n#font = ImageFont.load_default()\nfont1 = ImageFont.truetype('/home/pi/oled/DSEG/fonts/DSEG7-Classic/DSEG7Classic-Light.ttf', 38)\n#font2 = ImageFont.truetype('/home/pi/oled/DSEG/fonts/DSEG14-Classic/DSEG14Classic-Light.ttf', 12)\n\nclass MyDaemon(Daemon):\n\tdef run(self):\n\t\tlogging.info('--------------')\n\t\tlogging.info('Daemon Started')\n\t\t\n#\t\toled.cls()\n\t\t\n\t\twhile True:\n\t\t\tdraw.rectangle((0, 0, 128, 64), outline=0, fill=0)\n#\t\t\toled.canvas.rectangle((0, 0, oled.width-1, oled.height-1), outline=1, fill=0)\t# Border\n\n\t\t\tdraw.text((0 , 3), time.strftime(\"%H:%M\"), font=font1, fill=1)\n\t\t\tif time.strftime(\"%H\")[:1] == '0':\t\t#remove leading 0 for hour\n\t\t\t\tdraw.text((0, 3), '0', font=font1, fill=0)\n#\t\t\tdraw.text((58, 0), time.strftime(\"%p\")[:1], font=font3, fill=1)\n#\t\t\tdraw.text((15 , 48), time.strftime(\"%d-%m-%Y\"), font=font2, fill=1)\n\t\t\toled.display()\n\t\t\t\n\t\t\ttime.sleep(-time.time() % 60)\n\t\t\t\n\t\tlogging.info('Daemon Ended')\n\n\nif __name__ == \"__main__\":\n daemonx = MyDaemon('/tmp/daemon-OLEDclock.pid')\n if len(sys.argv) == 2:\n if 'start' == sys.argv[1]:\n daemonx.start()\n elif 'stop' == sys.argv[1]:\n logging.info('Daemon Stopped')\n daemonx.stop()\n elif 'restart' == sys.argv[1]:\n logging.info('Daemon restarting')\n daemonx.restart()\n else:\n print(\"Unknown command\")\n sys.exit(2)\n sys.exit(0)\n else:\n print(\"usage: %s start|stop|restart\" % sys.argv[0])\n sys.exit(2)\n", "sub_path": "python/clock-dseg2-d.py", "file_name": "clock-dseg2-d.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "smbus.SMBus", "line_number": 17, "usage_type": "call"}, {"api_name": "lib_oled96.ssd1306", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 26, "usage_type": "name"}, {"api_name": "daemon2x.Daemon", "line_number": 29, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 40, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "377646224", "text": "from hashlib import md5\n\nfrom sslyze.server_connectivity_tester import ServerConnectivityTester, \\\n ServerConnectivityError, ConnectionToServerTimedOut\nfrom sslyze.ssl_settings import TlsWrappedProtocolEnum\nfrom sslyze.plugins.openssl_cipher_suites_plugin import Sslv20ScanCommand, \\\n Sslv30ScanCommand, Tlsv10ScanCommand, Tlsv11ScanCommand, \\\n Tlsv12ScanCommand, Tlsv13ScanCommand\nfrom sslyze.synchronous_scanner import SynchronousScanner\n\nfrom . import results\nfrom .errors import ConnectionError\n\n# Policy prohibits the use of SSL 2.0/3.0 and TLS 1.0\nciphersuites = {\n \"policy\": [Sslv20ScanCommand(), Sslv30ScanCommand(),\n Tlsv10ScanCommand(), Tlsv11ScanCommand()],\n \"full\": [Sslv20ScanCommand(), Sslv30ScanCommand(),\n Tlsv10ScanCommand(), Tlsv11ScanCommand(),\n Tlsv12ScanCommand(), Tlsv13ScanCommand()]\n }\n\n# sslyze config\nSynchronousScanner.DEFAULT_NETWORK_RETRIES = 1\nSynchronousScanner.DEFAULT_NETWORK_TIMEOUT = 3\n\nERROR_MSG_CONNECTION_TIMEOUT = 'TCP connection to {}:{} timed-out'.format\nERROR_MSG_UNKNOWN_CONNECTION = \\\n 'TCP connection to {}:{} encountered unknown error'.format\n\n\ndef scan(name, ip, port, view, suite):\n \"\"\" Five inputs: web site name, ip, port\n split-dns view, and cipher suite \"\"\"\n\n try:\n server_tester = ServerConnectivityTester(\n hostname=name,\n ip_address=ip,\n port=port,\n tls_wrapped_protocol=TlsWrappedProtocolEnum.HTTPS\n )\n # This line checks to see if the host is online\n server_info = server_tester.perform()\n ip = server_info.ip_address\n # Could not establish an SSL connection to the server\n except ConnectionToServerTimedOut:\n raise ConnectionError('Connection Timeout',\n ERROR_MSG_CONNECTION_TIMEOUT(name, port))\n except ServerConnectivityError:\n raise ConnectionError('Unknow Connection Error',\n ERROR_MSG_UNKNOWN_CONNECTION(name, port))\n\n # Create a new results dictionary\n scan_output = results.new()\n\n # I hash the combination of hostname and ip for tracking\n key = md5((f'{name}' + ip).encode(\"utf-8\")).hexdigest()\n results.set_result(scan_output, \"MD5\", key)\n results.set_result(scan_output, \"Hostname\", f'{name}')\n results.set_result(scan_output, \"IP\", ip)\n results.set_result(scan_output, \"View\", view)\n\n for suite in ciphersuites.get(suite):\n synchronous_scanner = SynchronousScanner()\n scan_result = synchronous_scanner.run_scan_command(server_info, suite)\n\n for cipher in scan_result.accepted_cipher_list:\n results.set_ciphers(scan_output,\n {\n \"Version\": cipher.ssl_version.name,\n \"Cipher\": cipher.name\n }\n )\n\n if len(scan_output[\"Results\"]) == 0:\n results.set_result(scan_output, \"Results\", \"No Policy Violations\")\n\n return scan_output\n", "sub_path": "SSLChecker/sharedcode/scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Sslv20ScanCommand", "line_number": 16, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Sslv30ScanCommand", "line_number": 16, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv10ScanCommand", "line_number": 17, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv11ScanCommand", "line_number": 17, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Sslv20ScanCommand", "line_number": 18, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Sslv30ScanCommand", "line_number": 18, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv10ScanCommand", "line_number": 19, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv11ScanCommand", "line_number": 19, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv12ScanCommand", "line_number": 20, "usage_type": "call"}, {"api_name": "sslyze.plugins.openssl_cipher_suites_plugin.Tlsv13ScanCommand", "line_number": 20, "usage_type": "call"}, {"api_name": "sslyze.synchronous_scanner.SynchronousScanner.DEFAULT_NETWORK_RETRIES", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sslyze.synchronous_scanner.SynchronousScanner", "line_number": 24, "usage_type": "name"}, {"api_name": "sslyze.synchronous_scanner.SynchronousScanner.DEFAULT_NETWORK_TIMEOUT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sslyze.synchronous_scanner.SynchronousScanner", "line_number": 25, "usage_type": "name"}, {"api_name": "sslyze.server_connectivity_tester.ServerConnectivityTester", "line_number": 37, "usage_type": "call"}, {"api_name": "sslyze.ssl_settings.TlsWrappedProtocolEnum.HTTPS", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sslyze.ssl_settings.TlsWrappedProtocolEnum", "line_number": 41, "usage_type": "name"}, {"api_name": "sslyze.server_connectivity_tester.ConnectionToServerTimedOut", "line_number": 47, "usage_type": "name"}, {"api_name": "errors.ConnectionError", "line_number": 48, "usage_type": "call"}, {"api_name": "sslyze.server_connectivity_tester.ServerConnectivityError", "line_number": 50, "usage_type": "name"}, {"api_name": "errors.ConnectionError", "line_number": 51, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 58, "usage_type": "call"}, {"api_name": "sslyze.synchronous_scanner.SynchronousScanner", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "259890367", "text": "from tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import font as tkfont\r\nimport tkinter.messagebox\r\nimport os\r\nimport sqlite3\r\n\r\n\r\n#####################################################################################################################################################################################\r\n\r\nclass sis(tk.Tk):\r\n\r\n def __init__(self, *args, **kwargs):\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n all_frame = tk.Frame(self)\r\n all_frame.pack(side=\"top\", fill=\"both\", expand = True)\r\n all_frame.rowconfigure(0, weight=1)\r\n all_frame.columnconfigure(0, weight=1)\r\n self.frames = {}\r\n for F in (Students, Home, Courses):\r\n frame = F(all_frame, self)\r\n self.frames[F] = frame\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n self.show(Home)\r\n def show(self, page_number):\r\n frame = self.frames[page_number]\r\n frame.tkraise()\r\n\r\n#####################################################################################################################################################################################\r\n\r\nclass Home(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n leftcolor = tk.Label(self, height = 1260, width =550, bg = \"maroon\")\r\n leftcolor.place(x=0, y=0)\r\n label = tk.Label(self, text=\"STUDENT INFORMATION SYSTEM\", bg= \"gold\", fg= \"white\", relief=RIDGE,font=(\"Arial bold\", 45))\r\n label.place(x=130,y=20)\r\n\r\n home = tk.Button(self, text=\"HOME\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Home))\r\n home.place(x=210,y=465)\r\n home.config(cursor= \"hand2\")\r\n \r\n course = tk.Button(self, text=\"COURSES\",font=(\"Arial\",18,\"bold\"),height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Courses))\r\n course.place(x=880,y=465)\r\n course.config(cursor= \"hand2\")\r\n \r\n students = tk.Button(self, text=\"STUDENTS\",font=(\"Arial\",18, \"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Students))\r\n students.place(x=540,y=465)\r\n students.config(cursor= \"hand2\")\r\n\r\n self.students=Button(self, font=(\"Cooper Black\",20), padx=5, width=25,height=8, bd=0, text=\" \"\"STUDENT INFORMATION\",anchor=W, bg=\"gold\",fg=\"white\", command=lambda: controller.show(Students))\r\n self.students.config(cursor= \"hand2\")\r\n self.students.place(x=120,y=140)\r\n \r\n \r\n self.course=Button(self, font=(\"Cooper Black\",20), padx =5, width=25, height=8, bd=0, text=\" \"\"LISTS OF COURSES\",anchor=W, bg=\"gold\",fg=\"white\", command=lambda: controller.show(Courses))\r\n self.course.config(cursor= \"hand2\")\r\n self.course.place(x=670,y=140)\r\n\r\n\r\n\r\n \r\n \r\n ##################################################################################################################################################################################### \r\n\r\nclass Courses(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n leftcolor = tk.Label(self, height = 1260, width =550, bg = \"maroon\")\r\n leftcolor.place(x=0, y=0)\r\n self.controller = controller\r\n self.controller.title(\"Student Information System\")\r\n \r\n label = tk.Label(self, text=\"COURSE/S\",bg= \"gold\", fg= \"white\", relief=RIDGE, font=(\"Arial\", 40, \"bold\"))\r\n label.place(x=500,y=20)\r\n \r\n Course_Code = StringVar()\r\n Course_Name = StringVar()\r\n SearchBar_Var = StringVar()\r\n \r\n def tablec():\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"CREATE TABLE IF NOT EXISTS courses (Course_Code TEXT PRIMARY KEY, Course_Name TEXT)\") \r\n conn.commit() \r\n conn.close()\r\n \r\n def add_course():\r\n if Course_Code.get() == \"\" or Course_Name.get() == \"\" : \r\n tkinter.messagebox.showinfo(\"Course/s\", \"Fill in the box\")\r\n else:\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n c = conn.cursor() \r\n c.execute(\"INSERT INTO courses(Course_Code,Course_Name) VALUES (?,?)\",(Course_Code.get(),Course_Name.get())) \r\n conn.commit() \r\n conn.close()\r\n Course_Code.set('')\r\n Course_Name.set('') \r\n tkinter.messagebox.showinfo(\"Course/s\", \"Course Added Successfully!\")\r\n display_course()\r\n \r\n def display_course():\r\n self.course_list.delete(*self.course_list.get_children())\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM courses\")\r\n rows = cur.fetchall()\r\n for row in rows:\r\n self.course_list.insert(\"\", tk.END, text=row[0], values=row[0:])\r\n conn.close()\r\n \r\n def update_course():\r\n for selected in self.course_list.selection():\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"UPDATE courses SET Course_Code=?, Course_Name=? WHERE Course_Code=?\", (Course_Code.get(),Course_Name.get(), self.course_list.set(selected, '#1'))) \r\n conn.commit()\r\n tkinter.messagebox.showinfo(\"Course/s\", \"Course Updated Successfully!\")\r\n display_course()\r\n clear()\r\n conn.close()\r\n \r\n def edit_course():\r\n x = self.course_list.focus()\r\n if x == \"\":\r\n tkinter.messagebox.showerror(\"Course/s\", \"Select a course!\")\r\n return\r\n values = self.course_list.item(x, \"values\")\r\n Course_Code.set(values[0])\r\n Course_Name.set(values[1])\r\n \r\n def delete_course(): \r\n try:\r\n messageDelete = tkinter.messagebox.askyesno(\"Student Info\", \"Are you sure you want to delete this record?\")\r\n if messageDelete > 0: \r\n con = sqlite3.connect(\"sis_v2.db\")\r\n cur = con.cursor()\r\n x = self.course_list.selection()[0]\r\n id_no = self.course_list.item(x)[\"values\"][0]\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"DELETE FROM courses WHERE Course_Code = ?\",(id_no,)) \r\n con.commit()\r\n self.course_list.delete(x)\r\n tkinter.messagebox.showinfo(\"Course/s\", \"Course deleted!\")\r\n display_course()\r\n con.close() \r\n except:\r\n tkinter.messagebox.showerror(\"Course/s\", \"This course has students!\")\r\n \r\n def search_course():\r\n Course_Code = SearchBar_Var.get() \r\n con = sqlite3.connect(\"sis_v2.db\")\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM courses WHERE Course_Code = ?\",(Course_Code,))\r\n con.commit()\r\n self.course_list.delete(*self.course_list.get_children())\r\n rows = cur.fetchall()\r\n for row in rows:\r\n self.course_list.insert(\"\", tk.END, text=row[0], values=row[0:])\r\n con.close()\r\n \r\n def clear():\r\n Course_Code.set('')\r\n Course_Name.set('') \r\n \r\n def OnDoubleclick(event):\r\n item = self.course_list.selection()[0]\r\n values = self.course_list.item(item, \"values\")\r\n Course_Code.set(values[0])\r\n Course_Name.set(values[1])\r\n\r\n home = tk.Button(self, text=\"HOME\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Home))\r\n home.place(x=210,y=465)\r\n home.config(cursor= \"hand2\")\r\n \r\n course = tk.Button(self, text=\"COURSES\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Courses))\r\n course.place(x=880,y=465)\r\n course.config(cursor= \"hand2\")\r\n \r\n student = tk.Button(self, text=\"STUDENTS\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Students))\r\n student.place(x=540,y=465)\r\n student.config(cursor= \"hand2\")\r\n \r\n self.lblccode = Label(self, font=(\"Arial\", 17, \"bold\"), text=\"Course Code:\", bg= \"gold\", fg= \"white\", relief=RIDGE, padx=5, pady=5)\r\n self.lblccode.place(x=30,y=144)\r\n self.txtccode = Entry(self, font=(\"Arial\", 17), textvariable=Course_Code, width=20)\r\n self.txtccode.place(x=200,y=150)\r\n\r\n self.lblcname = Label(self, font=(\"Arial\", 17,\"bold\"), text=\"Course Name:\",bg= \"gold\", fg= \"white\", relief=RIDGE, padx=5, pady=5)\r\n self.lblcname.place(x=30,y=205)\r\n self.txtcname = Entry(self, font=(\"Arial\", 17), textvariable=Course_Name, width=35)\r\n self.txtcname.place(x=70,y=250)\r\n \r\n self.SearchBar = Entry(self, font=(\"Arial\", 15), textvariable=SearchBar_Var, bd=3, width=20)\r\n self.SearchBar.place(x=850,y=102)\r\n \r\n scrollbar = Scrollbar(self, orient=VERTICAL)\r\n scrollbar.place(x=1215,y=140,height=290)\r\n\r\n self.course_list = ttk.Treeview(self, columns=(\"Course Code\",\"Course Name\"), height = 13, yscrollcommand=scrollbar.set)\r\n\r\n self.course_list.heading(\"Course Code\", text=\"Course Code\", anchor=W)\r\n self.course_list.heading(\"Course Name\", text=\"Course Name\",anchor=W)\r\n self.course_list['show'] = 'headings'\r\n\r\n self.course_list.column(\"Course Code\", width=200, anchor=W, stretch=False)\r\n self.course_list.column(\"Course Name\", width=430, stretch=False)\r\n \r\n self.course_list.bind(\" \", OnDoubleclick)\r\n\r\n\r\n self.course_list.place(x=575,y=140)\r\n scrollbar.config(command=self.course_list.yview)\r\n\r\n self.lblccode = Label(self,height = 8, width = 65,relief=RIDGE, bg=\"orange\", fg=\"white\", padx=5, pady=5)\r\n self.lblccode.place(x=90,y=305)\r\n\r\n \r\n \r\n ## Buttons\r\n\r\n self.adds = Button(self, text=\"ADD\", font=(\"Arial\",17,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\",command=add_course)\r\n self.adds.place(x=100,y=320)\r\n self.adds.config(cursor= \"hand2\")\r\n\r\n self.update = Button(self, text=\"UPDATE\", font=(\"Arial\",17,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\", command=update_course) \r\n self.update.place(x=100,y=380)\r\n self.update.config(cursor= \"hand2\")\r\n\r\n self.clear = Button(self, text=\"CLEAR\", font=(\"Arial\",17,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\", command=clear)\r\n self.clear.place(x=394,y=320)\r\n self.clear.config(cursor= \"hand2\")\r\n\r\n\r\n self.delete = Button(self, text=\"DELETE\", font=(\"Arial\",17,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\", command=delete_course)\r\n self.delete.place(x=394,y=380)\r\n self.delete.config(cursor= \"hand2\")\r\n\r\n self.search = Button(self, text=\"SEARCH\", font=(\"Arial\",14,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\", command=search_course)\r\n self.search.place(x=1080,y=100)\r\n self.search.config(cursor= \"hand2\")\r\n\r\n self.display = Button(self, text=\"DISPLAY\", font=(\"Arial\",14,\"bold\"),bd=0, width = 10, bg=\"gold\", fg=\"white\", command=display_course)\r\n self.display.place(x=575,y=103)\r\n self.display.config(cursor= \"hand2\")\r\n \r\n tablec()\r\n display_course()\r\n\r\n#####################################################################################################################################################################################\r\n\r\nclass Students(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self,parent)\r\n leftcolor = tk.Label(self, height = 1260, width =550, bg = \"maroon\")\r\n leftcolor.place(x=0, y=0)\r\n self.controller = controller\r\n self.controller.title(\"Student Information System\")\r\n \r\n label = tk.Label(self, text=\"STUDENT INFORMATION\",bg= \"gold\", fg= \"white\", relief=RIDGE, font=(\"Arial\", 40, \"bold\"))\r\n label.place(x=320,y=20)\r\n \r\n home = tk.Button(self, text=\"HOME\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Home))\r\n home.place(x=210,y=465)\r\n home.config(cursor= \"hand2\")\r\n \r\n course = tk.Button(self, text=\"COURSES\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Courses))\r\n course.place(x=880,y=465)\r\n course.config(cursor= \"hand2\")\r\n \r\n student = tk.Button(self, text=\"STUDENTS\",font=(\"Arial\",18,\"bold\"), height = 1, width = 12,relief=RIDGE, bg=\"gold\", fg=\"white\", command=lambda: controller.show(Students))\r\n student.place(x=540,y=465)\r\n student.config(cursor= \"hand2\")\r\n \r\n Student_ID = StringVar()\r\n Student_Name = StringVar() \r\n Student_YearLevel = StringVar()\r\n Student_Gender = StringVar()\r\n Course_Code = StringVar()\r\n SearchBar_Var = StringVar()\r\n \r\n\r\n def tables():\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"CREATE TABLE IF NOT EXISTS students (Student_ID TEXT PRIMARY KEY, Student_Name TEXT, Course_Code TEXT, \\\r\n Student_YearLevel TEXT, Student_Gender TEXT, \\\r\n FOREIGN KEY(Course_Code) REFERENCES courses(Course_Code) ON UPDATE CASCADE)\") \r\n conn.commit() \r\n conn.close() \r\n \r\n def add_stud():\r\n if Student_ID.get() == \"\" or Student_Name.get() == \"\" or Course_Code.get() == \"\" or Student_YearLevel.get() == \"\" or Student_Gender.get() == \"\": \r\n tkinter.messagebox.showinfo(\"Student Information\", \"Fill in the box\")\r\n else: \r\n ID = Student_ID.get()\r\n ID_list = []\r\n for i in ID:\r\n ID_list.append(i)\r\n a = ID.split(\"-\")\r\n if len(a[0]) == 4: \r\n if \"-\" in ID_list:\r\n if len(a[1]) == 1:\r\n tkinter.messagebox.showerror(\"Student Information\", \"ID Format:YYYY-NNNN\")\r\n elif len(a[1]) ==2:\r\n tkinter.messagebox.showerror(\"Student Information\", \"ID Format:YYYY-NNNN\")\r\n elif len(a[1]) ==3:\r\n tkinter.messagebox.showerror(\"Student Information\", \"ID Format:YYYY-NNNN\")\r\n else:\r\n x = ID.split(\"-\") \r\n year = x[0]\r\n number = x[1]\r\n if year.isdigit()==False or number.isdigit()==False:\r\n try:\r\n tkinter.messagebox.showerror(\"Student Information\", \"Invalid ID\")\r\n except:\r\n pass\r\n elif year==\" \" or number==\" \":\r\n try:\r\n tkinter.messagebox.showerror(\"Student Information\", \"Invalid ID\")\r\n except:\r\n pass\r\n else:\r\n try:\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n c = conn.cursor() \r\n c.execute(\"PRAGMA foreign_keys = ON\") \r\n c.execute(\"INSERT INTO students(Student_ID,Student_Name,Course_Code,Student_YearLevel,Student_Gender) VALUES (?,?,?,?,?)\",\\\r\n (Student_ID.get(),Student_Name.get(),Course_Code.get(),Student_YearLevel.get(), Student_Gender.get())) \r\n \r\n tkinter.messagebox.showinfo(\"Student Information\", \"Student Added Successfully!\")\r\n conn.commit() \r\n clear()\r\n display_stud()\r\n conn.close()\r\n except:\r\n ids=[]\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM students\")\r\n rows = c.fetchall()\r\n for row in rows:\r\n ids.append(row[0])\r\n if ID in ids:\r\n tkinter.messagebox.showerror(\"Student Information\", \"ID already exists\")\r\n else: \r\n tkinter.messagebox.showerror(\"Student Information\", \"Course Unavailable\")\r\n \r\n else:\r\n tkinter.messagebox.showerror(\"Student Information\", \"Invalid ID\")\r\n else:\r\n tkinter.messagebox.showerror(\"Student Information\", \"Invalid ID\")\r\n \r\n def update_stud():\r\n if Student_ID.get() == \"\" or Student_Name.get() == \"\" or Course_Code.get() == \"\" or Student_YearLevel.get() == \"\" or Student_Gender.get() == \"\": \r\n tkinter.messagebox.showinfo(\"Student Information\", \"Select a student\")\r\n else:\r\n for selected in self.studentlist.selection():\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"UPDATE students SET Student_ID=?, Student_Name=?, Course_Code=?, Student_YearLevel=?,Student_Gender=?\\\r\n WHERE Student_ID=?\", (Student_ID.get(),Student_Name.get(),Course_Code.get(),Student_YearLevel.get(), Student_Gender.get(),\\\r\n self.studentlist.set(selected, '#1')))\r\n conn.commit()\r\n tkinter.messagebox.showinfo(\"Student Information\", \"Student record updated!\")\r\n display_stud()\r\n clear()\r\n conn.close()\r\n \r\n def delete_stud(): \r\n try:\r\n messageDelete = tkinter.messagebox.askyesno(\"Student Information\", \"Are you sure you want to delete this record?\")\r\n if messageDelete > 0: \r\n con = sqlite3.connect(\"sis_v2.db\")\r\n cur = con.cursor()\r\n x = self.studentlist.selection()[0]\r\n id_no = self.studentlist.item(x)[\"values\"][0]\r\n cur.execute(\"DELETE FROM students WHERE Student_ID = ?\",(id_no,)) \r\n con.commit()\r\n self.studentlist.delete(x)\r\n tkinter.messagebox.showinfo(\"Student Information\", \"Student record deleted successfully!\")\r\n display_stud()\r\n clear()\r\n con.close() \r\n except Exception as e:\r\n print(e)\r\n \r\n def search_stud():\r\n Student_ID = SearchBar_Var.get()\r\n try: \r\n con = sqlite3.connect(\"sis_v2.db\")\r\n cur = con.cursor()\r\n cur .execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"SELECT * FROM students\")\r\n con.commit()\r\n self.studentlist.delete(*self.studentlist.get_children())\r\n rows = cur.fetchall()\r\n for row in rows:\r\n if row[0].startswith(Student_ID):\r\n self.studentlist.insert(\"\", tk.END, text=row[0], values=row[0:])\r\n con.close()\r\n except:\r\n tkinter.messagebox.showerror(\"Student Information\", \"Invalid ID\") \r\n \r\n def display_stud():\r\n self.studentlist.delete(*self.studentlist.get_children())\r\n conn = sqlite3.connect(\"sis_v2.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"PRAGMA foreign_keys = ON\")\r\n cur.execute(\"SELECT * FROM students\")\r\n rows = cur.fetchall()\r\n for row in rows:\r\n self.studentlist.insert(\"\", tk.END, text=row[0], values=row[0:])\r\n conn.close()\r\n \r\n def edit_stud():\r\n x = self.studentlist.focus()\r\n if x == \"\":\r\n tkinter.messagebox.showerror(\"Student Information\", \"Select a record\")\r\n return\r\n values = self.studentlist.item(x, \"values\")\r\n Student_ID.set(values[0])\r\n Student_Name.set(values[1])\r\n Course_Code.set(values[2])\r\n Student_YearLevel.set(values[3])\r\n Student_Gender.set(values[4])\r\n \r\n def clear():\r\n Student_ID.set('')\r\n Student_Name.set('') \r\n Student_YearLevel.set('')\r\n Student_Gender.set('')\r\n Course_Code.set('')\r\n \r\n def OnDoubleClick(event):\r\n item = self.studentlist.selection()[0]\r\n values = self.studentlist.item(item, \"values\")\r\n Student_ID.set(values[0])\r\n Student_Name.set(values[1])\r\n Course_Code.set(values[2])\r\n Student_YearLevel.set(values[3])\r\n Student_Gender.set(values[4])\r\n\r\n self.lblccode = Label(self,height = 3, width =78,relief=RIDGE, bg=\"orange\", fg=\"white\", padx=5, pady=5)\r\n self.lblccode.place(x=22,y=400)\r\n\r\n self.lblid = Label(self, font=(\"Arial\", 14, \"bold\"), text=\"ID Number:\", bg= \"gold\", fg= \"white\", padx=20, pady=5)\r\n self.lblid.place(x=40,y=144)\r\n self.txtid = Entry(self, font=(\"Arial\", 14), textvariable=Student_ID, width=27)\r\n self.txtid.place(x=210,y=150)\r\n\r\n self.lblname = Label(self, font=(\"Arial\", 14, \"bold\"), text=\"Name:\", bg= \"gold\", fg= \"white\", padx=38, pady=5)\r\n self.lblname.place(x=40,y=195)\r\n self.txtname = Entry(self, font=(\"Arial\", 14), textvariable=Student_Name, width=27)\r\n self.txtname.place(x=210,y=200)\r\n \r\n self.lblc = Label(self, font=(\"Arial\", 14, \"bold\"), text=\"Course:\", bg= \"gold\", fg= \"white\", padx=35, pady=5)\r\n self.lblc.place(x=40,y=240)\r\n self.txtc = Entry(self,font=(\"Arial\", 14), textvariable=Course_Code, width=27)\r\n self.txtc.place(x=210,y=246)\r\n\r\n self.lblyear = Label(self,font=(\"Arial\", 14, \"bold\"), text=\"Year Level:\", bg= \"gold\", fg= \"white\", padx=19, pady=5)\r\n self.lblyear.place(x=40,y=295)\r\n self.txtyear = ttk.Combobox(self, value=[\"1st Year\", \"2nd Year\", \"3rd Year\", \"4th Year\"], state=\"readonly\", font=(\"Arial\", 14), textvariable=Student_YearLevel, width=26)\r\n self.txtyear.place(x=210,y=305)\r\n \r\n self.lblgender = Label(self, font=(\"Arial\", 14, \"bold\"), text=\"Gender:\", bg= \"gold\", fg= \"white\", padx=32, pady=5)\r\n self.lblgender.place(x=40,y=350)\r\n self.txtgender = ttk.Combobox(self, value=[\"Male\", \"Female\"], font=(\"Arial\", 14), state=\"readonly\", textvariable=Student_Gender, width=26)\r\n self.txtgender.place(x=210,y=356)\r\n\r\n self.SearchBar = Entry(self, font=(\"Arial\", 11), textvariable=SearchBar_Var, bd=3, width=34)\r\n self.SearchBar.place(x=870,y=105)\r\n\r\n ## Treeview\r\n\r\n scrollbar = Scrollbar(self, orient=VERTICAL)\r\n scrollbar.place(x=1230,y=140,height=305)\r\n \r\n\r\n self.studentlist = ttk.Treeview(self, columns=(\"ID Number\", \"Name\", \"Course\", \"Year Level\", \"Gender\"), height = 14, yscrollcommand=scrollbar.set)\r\n\r\n self.studentlist.heading(\"ID Number\", text=\"ID Number\", anchor=W)\r\n self.studentlist.heading(\"Name\", text=\"Name\",anchor=W)\r\n self.studentlist.heading(\"Course\", text=\"Course\",anchor=W)\r\n self.studentlist.heading(\"Year Level\", text=\"Year Level\",anchor=W)\r\n self.studentlist.heading(\"Gender\", text=\"Gender\",anchor=W)\r\n self.studentlist['show'] = 'headings'\r\n\r\n self.studentlist.column(\"ID Number\", width=100, anchor=W, stretch=False)\r\n self.studentlist.column(\"Name\", width=200, stretch=False)\r\n self.studentlist.column(\"Course\", width=130, anchor=W, stretch=False)\r\n self.studentlist.column(\"Year Level\", width=100, anchor=W, stretch=False)\r\n self.studentlist.column(\"Gender\", width=100, anchor=W, stretch=False)\r\n \r\n self.studentlist.bind(\"\",OnDoubleClick)\r\n \r\n \r\n\r\n self.studentlist.place(x=590,y=140)\r\n scrollbar.config(command=self.studentlist.yview)\r\n \r\n ## Buttons\r\n \r\n self.add = Button(self, text=\"ADD\", font=(\"Arial\", 16, \"bold\"), bg= \"gold\", fg= \"white\", padx= 20, command=add_stud)\r\n self.add.place(x=35,y=408)\r\n self.add.config(cursor= \"hand2\")\r\n\r\n self.update = Button(self, text=\"UPDATE\", font=(\"Arial\", 16, \"bold\"), bg= \"gold\", fg= \"white\",padx= 10, command=update_stud)\r\n self.update.place(x=175,y=408)\r\n self.update.config(cursor= \"hand2\")\r\n\r\n self.clear = Button(self, text=\"CLEAR\", font=(\"Arial\", 16, \"bold\"), bg= \"gold\", fg= \"white\", padx= 10,command=clear)\r\n self.clear.place(x=330,y=408)\r\n self.clear.config(cursor= \"hand2\")\r\n\r\n self.delete = Button(self, text=\"DELETE\", font=(\"Arial\",16, \"bold\"), bg= \"gold\", fg= \"white\",padx= 6, command=delete_stud)\r\n self.delete.place(x=460,y=408)\r\n self.delete.config(cursor= \"hand2\")\r\n\r\n self.search = Button(self, text=\"SEARCH\", font=(\"Arial\", 14, \"bold\"),bd=0, bg= \"gold\", fg=\"white\", command=search_stud)\r\n self.search.place(x=1160,y=100)\r\n self.search.config(cursor= \"hand2\")\r\n\r\n self.display = Button(self, text=\"DISPLAY\", font=(\"Arial\", 14, \"bold\"), bd=0, bg= \"gold\", fg=\"white\",command = display_stud)\r\n self.display.place(x=588,y=97)\r\n self.display.config(cursor= \"hand2\")\r\n\r\n \r\n tables()\r\n display_stud()\r\n\r\n#####################################################################################################################################################################################\r\n\r\nroot = sis()\r\nroot.geometry(\"1260x550\")\r\n\r\nroot.mainloop()\r\n", "sub_path": "SIS2.py", "file_name": "SIS2.py", "file_ext": "py", "file_size_in_byte": 27452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "tkinter.Tk", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 34, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 94, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 101, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 106, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 121, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 129, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.askyesno", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 139, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 147, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 175, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 183, "usage_type": "call"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 203, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 203, "usage_type": "name"}, {"api_name": "tkinter.Frame", "line_number": 255, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 258, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 258, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 259, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 264, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 267, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 275, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 288, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 299, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 299, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 309, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 309, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 311, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 311, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 313, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 313, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 320, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 320, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 325, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 325, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 330, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 336, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 336, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 343, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 350, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 352, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 352, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 355, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 355, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 357, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 357, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 361, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 361, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 364, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 371, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.askyesno", "line_number": 378, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 378, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 380, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 387, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 387, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 397, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 406, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 409, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 409, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 413, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 419, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 425, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 425, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 470, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 470, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 475, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 475, "usage_type": "name"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 487, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 487, "usage_type": "name"}]} +{"seq_id": "535649408", "text": "import dash\nimport dash_bootstrap_components as dbc\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objects as go\nfrom dash.dependencies import Input, Output\nimport pandas as pd\n\n## variables\ncoffee_flavours_1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv')\ncoffee_flavours_2= pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv')\ncoffee_exports = pd.read_csv('data_processing/coffee_exports.csv')\n\n\ndef get_coffee_flavours(model):\n dataset = None\n if model == 1:\n dataset = coffee_flavours_1\n elif model == 2:\n dataset = coffee_flavours_2\n else:\n print(\"whattt\")\n \n fig = go.Figure()\n fig.add_trace(go.Sunburst(\n ids=dataset.ids,\n labels=dataset.labels,\n parents=dataset.parents,\n domain=dict(column=model)\n ))\n\n fig.update_layout(\n margin = dict(t=20, l=2, r=2, b=2)\n )\n fig.layout\n return fig;\n\n\ndef get_coffee_exports(selected_year, selected_variable):\n filtered_df = coffee_exports[coffee_exports.Anio == selected_year]\n trace1 = go.Bar(x=filtered_df['PaisDestino'], y=filtered_df[selected_variable], name=\"toneladas\", )\n return {\n 'data': [trace1],\n 'layout': go.Layout(colorway=[\"#EF963B\"], hovermode=\"closest\",\n xaxis={'title': \"Países\", 'titlefont': {'color': 'black', 'size': 14},\n 'tickfont': {'size': 9, 'color': 'black'}},\n yaxis={'title': selected_variable, 'titlefont': {'color': 'black', 'size': 14, },\n 'tickfont': {'color': 'black'}})}\n\nnavbar = dbc.NavbarSimple(\n children=[\n dbc.DropdownMenu(\n nav=True,\n in_navbar=True,\n label=\"Menú de aplicaciones\",\n children=[\n dbc.DropdownMenuItem(\"App 1\"),\n dbc.DropdownMenuItem(\"App 2\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Entry 3\"),\n ],\n ),\n ],\n brand=\"Arjé Coffee - Entendiendo el café\",\n brand_href=\"https://arjecoffee.co\",\n sticky=\"top\",\n)\n\nbody = dbc.Container(\n [\n dbc.Row([\n dbc.Col([\n html.H2(\"Ser un catador\"),\n html.P(\n \"\"\"Asímismo, cuando pruebes cosas, piensa de verdad en lo que estás\n percibiendo. Intenta comprender qué fue lo que causó aquella diferencia\n en el sabor. [Un catador con experiencia] suele usar un lenguaje más\n complejo y descriptivo y está más acostumbrado a separar las partes\n más allá de las sensaciones de sabor básicas. Esto te ayudará a tener\n mayor experiencia en percibir los alimentos y las bebidas,\n ser más consciente del sabor y desarrollar la forma\n en la que te comunicas acerca del sabor\"\"\"\n ),\n html.A([\n dbc.Button([ \"Aprender más\"], color=\"primary\",)\n ], href=\"https://www.perfectdailygrind.com/2018/10/notas-de-sabor-como-ayudar-a-los-consumidores-a-entenderlas/\"),\n ],md=4,\n ),\n dbc.Col([\n html.H2(\"Las notas del café\"),\n html.P(\"\"\"El café tiene notas maravillosas. Descúbrelas!\"\"\"),\n dcc.Dropdown(\n id='coffee-flavours-dropdown',\n options=[\n {'label': 'Por categoría', 'value': '1'},\n {'label': 'Por sabor', 'value': '2'},\n ],\n value='1'\n ),\n dcc.Graph(\n id='coffee-flavours',\n ),\n ]\n ),]\n ),\n dbc.Row([\n dbc.Col([\n html.H2(\"Exportaciones anuales de café\"),\n dcc.Dropdown(\n id='coffee-exports-dropdown',\n options=[\n {'label': 'USD en miles', 'value': 'ValorMilesFOBDol'},\n {'label': 'Pesos Colombianos en miles', 'value': 'ValorMilesPesos'},\n {'label': 'Toneladas', 'value': 'VolumenToneladas'},\n ],\n value='ValorMilesFOBDol',\n ),\n ])\n \n ]),\n dbc.Row([\n dbc.Col([\n dcc.Graph(id='exportaciones-por-anho'),\n html.Div([\n dcc.Slider(\n id='exportaciones-year-slider',\n min=coffee_exports['Anio'].min(),\n max=coffee_exports['Anio'].max(),\n value=coffee_exports['Anio'].min(),\n marks={str(year): str(year) for year in coffee_exports['Anio'].unique()},\n step=None,\n ),\n ],style={'paddingBottom': 40, 'paddingTop': 40}),\n \n ], md=12),\n \n ]),\n dbc.Row([\n html.H2([\"Tabla dinámica para otros insights\"])\n ]),\n dbc.Row([\n dbc.Col([\n dash_table.DataTable(\n id='datatable-interactivity',\n columns=[\n {\"name\": i, \"id\": i, \"deletable\": True, \"selectable\": True} for i in coffee_exports.columns\n ],\n data=coffee_exports.to_dict('records'),\n editable=True,\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n column_selectable=\"single\",\n row_selectable=\"multi\",\n row_deletable=True,\n selected_columns=[],\n selected_rows=[],\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n ),\n html.Div(id='datatable-interactivity-container')\n ])\n ]),\n\n ],\n className=\"mt-4\",\n)\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n\napp.layout = html.Div([navbar, body])\n\n\n\n@app.callback(\n dash.dependencies.Output('coffee-flavours', 'figure'),\n [dash.dependencies.Input('coffee-flavours-dropdown', 'value')])\ndef update_output(value):\n return get_coffee_flavours(int(value))\n\n\n@app.callback(\n Output('exportaciones-por-anho', 'figure'),\n [Input('exportaciones-year-slider', 'value'),\n Input('coffee-exports-dropdown', 'value')])\ndef update_figure(selected_year, selected_variable):\n return get_coffee_exports(selected_year, selected_variable)\n\n\n@app.callback(\n Output('datatable-interactivity', 'style_data_conditional'),\n [Input('datatable-interactivity', 'selected_columns')]\n)\ndef update_styles(selected_columns):\n return [{\n 'if': { 'column_id': i },\n 'background_color': '#D2F3FF'\n } for i in selected_columns]\n\n@app.callback(\n Output('datatable-interactivity-container', \"children\"),\n [Input('datatable-interactivity', \"derived_virtual_data\"),\n Input('datatable-interactivity', \"derived_virtual_selected_rows\")])\ndef update_graphs(rows, derived_virtual_selected_rows):\n # When the table is first rendered, `derived_virtual_data` and\n # `derived_virtual_selected_rows` will be `None`. This is due to an\n # idiosyncracy in Dash (unsupplied properties are always None and Dash\n # calls the dependent callbacks when the component is first rendered).\n # So, if `rows` is `None`, then the component was just rendered\n # and its value will be the same as the component's dataframe.\n # Instead of setting `None` in here, you could also set\n # `derived_virtual_data=df.to_rows('dict')` when you initialize\n # the component.\n if derived_virtual_selected_rows is None:\n derived_virtual_selected_rows = []\n\n dff = coffee_exports if rows is None else pd.DataFrame(rows)\n\n colors = ['#7FDBFF' if i in derived_virtual_selected_rows else '#0074D9'\n for i in range(len(dff))]\n\n return [\n dcc.Graph(\n id=column,\n figure={\n \"data\": [\n {\n \"x\": dff[\"PaisDestino\"],\n \"y\": dff[column],\n \"type\": \"bar\",\n \"marker\": {\"color\": colors},\n }\n ],\n \"layout\": {\n \"xaxis\": {\"automargin\": True},\n \"yaxis\": {\n \"automargin\": True,\n \"title\": {\"text\": column}\n },\n \"height\": 250,\n \"margin\": {\"t\": 10, \"l\": 10, \"r\": 10},\n },\n },\n )\n # check if column exists - user may have deleted it\n # If `column.deletable=False`, then you don't\n # need to do this check.\n for column in [\"ValorMilesFOBDol\", \"ValorMilesPesos\", \"VolumenToneladas\"] if column in dff\n ]\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\nserver = app.server\napp.config.suppress_callback_exceptions = True", "sub_path": "app_deploy.py", "file_name": "app_deploy.py", "file_ext": "py", "file_size_in_byte": 8872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 25, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 25, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Sunburst", "line_number": 26, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 26, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 42, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 42, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Layout", "line_number": 45, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 45, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.NavbarSimple", "line_number": 51, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.DropdownMenu", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.DropdownMenuItem", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.DropdownMenuItem", "line_number": 59, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.DropdownMenuItem", "line_number": 60, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.DropdownMenuItem", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Container", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 72, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 73, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 74, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 75, "usage_type": "call"}, {"api_name": "dash_html_components.A", "line_number": 85, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 86, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 90, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 91, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 92, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 93, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 101, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 108, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 109, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 110, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 122, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 123, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 124, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 125, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 126, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 139, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 140, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 142, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_table.DataTable", "line_number": 144, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 163, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 171, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 171, "usage_type": "attribute"}, {"api_name": "dash_html_components.Div", "line_number": 173, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 178, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 178, "usage_type": "attribute"}, {"api_name": "dash.dependencies.Input", "line_number": 179, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 179, "usage_type": "attribute"}, {"api_name": "dash.dependencies.Output", "line_number": 185, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 186, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 187, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 193, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 194, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 219, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 225, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 203, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 204, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 205, "usage_type": "call"}]} +{"seq_id": "197318180", "text": "import logging\nimport re\nfrom secrets import token_urlsafe\nimport time\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.forms.models import model_to_dict\nfrom django.http import (\n HttpResponse,\n HttpResponseBadRequest,\n HttpResponseForbidden,\n HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport jwt\n\nfrom aidants_connect_web.decorators import activity_required\nfrom aidants_connect_web.models import (\n Connection,\n Journal,\n Usager,\n)\n\nfrom aidants_connect_web.utilities import generate_sha256_hash\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger()\n\n\ndef check_request_parameters(\n parameters: dict, expected_static_parameters: dict, view_name: str\n) -> tuple:\n \"\"\"\n When a request arrives, this function checks that all requested parameters are\n present (if not, returns (1, \"missing parameter\") and if the static parameters are\n correct (if not, returns (1, \"forbidden parameter value\")). If all is good, returns\n (0, \"all is good\")\n :param parameters: dict of all parameters expected in the request\n (None if the parameter was not present)\n :param expected_static_parameters: subset of parameters that are not dynamic\n :param view_name: str with the name of the view for logging purposes\n :return: tuple (error, message) where error is a bool and message an str\n \"\"\"\n for parameter, value in parameters.items():\n if not value:\n error_message = f\"400 Bad request: There is no {parameter} @ {view_name}\"\n log.info(error_message)\n return 1, \"missing parameter\"\n elif (\n parameter not in expected_static_parameters\n and parameter in [\"state\", \"nonce\"]\n and not value.isalnum()\n ):\n error_message = (\n f\"403 forbidden request: malformed {parameter} @ {view_name}\"\n )\n log.info(error_message)\n return 1, \"malformed parameter value\"\n elif (\n parameter in expected_static_parameters\n and value != expected_static_parameters[parameter]\n ):\n error_message = (\n f\"403 forbidden request: unexpected {parameter} @ {view_name}\"\n )\n log.info(error_message)\n return 1, \"forbidden parameter value\"\n return 0, \"all good\"\n\n\n@login_required\n@activity_required\ndef authorize(request):\n\n if request.method == \"GET\":\n parameters = {\n \"state\": request.GET.get(\"state\"),\n \"nonce\": request.GET.get(\"nonce\"),\n \"response_type\": request.GET.get(\"response_type\"),\n \"client_id\": request.GET.get(\"client_id\"),\n \"redirect_uri\": request.GET.get(\"redirect_uri\"),\n \"scope\": request.GET.get(\"scope\"),\n \"acr_values\": request.GET.get(\"acr_values\"),\n }\n EXPECTED_STATIC_PARAMETERS = {\n \"response_type\": \"code\",\n \"client_id\": settings.FC_AS_FI_ID,\n \"redirect_uri\": settings.FC_AS_FI_CALLBACK_URL,\n \"scope\": \"openid profile email address phone birth\",\n \"acr_values\": \"eidas1\",\n }\n\n error, message = check_request_parameters(\n parameters, EXPECTED_STATIC_PARAMETERS, \"authorize\"\n )\n if error:\n return (\n HttpResponseBadRequest()\n if message == \"missing parameter\"\n else HttpResponseForbidden()\n )\n\n connection = Connection.objects.create(\n state=parameters[\"state\"],\n nonce=parameters[\"nonce\"],\n )\n aidant = request.user\n\n return render(\n request,\n \"aidants_connect_web/id_provider/authorize.html\",\n {\n \"connection_id\": connection.id,\n \"usagers\": aidant.get_usagers_with_active_autorisation(),\n \"aidant\": aidant,\n },\n )\n\n else:\n parameters = {\n \"connection_id\": request.POST.get(\"connection_id\"),\n \"chosen_usager\": request.POST.get(\"chosen_usager\"),\n }\n\n try:\n connection = Connection.objects.get(pk=parameters[\"connection_id\"])\n if connection.is_expired:\n log.info(\"connection has expired at authorize\")\n return render(request, \"408.html\", status=408)\n except ObjectDoesNotExist:\n log.info(\"No connection corresponds to the connection_id:\")\n log.info(parameters[\"connection_id\"])\n logout(request)\n return HttpResponseForbidden()\n\n aidant = request.user\n chosen_usager = Usager.objects.get(pk=parameters[\"chosen_usager\"])\n if chosen_usager not in aidant.get_usagers_with_active_autorisation():\n log.info(\n \"This usager does not have a valid autorisation \"\n \"with the aidant's organisation\"\n )\n log.info(aidant.id)\n logout(chosen_usager.id)\n logout(request)\n return HttpResponseForbidden()\n\n connection.usager = chosen_usager\n connection.save()\n\n select_demarches_url = (\n f\"{reverse('fi_select_demarche')}?connection_id={connection.id}\"\n )\n return redirect(select_demarches_url)\n\n\n@login_required\n@activity_required()\ndef fi_select_demarche(request):\n if request.method == \"GET\":\n parameters = {\n \"connection_id\": request.GET.get(\"connection_id\"),\n }\n\n try:\n connection = Connection.objects.get(pk=parameters[\"connection_id\"])\n if connection.is_expired:\n log.info(\"Connection has expired at select_demarche\")\n return render(request, \"408.html\", status=408)\n except ObjectDoesNotExist:\n log.info(\"No connection matches the connection_id:\")\n log.info(parameters[\"connection_id\"])\n logout(request)\n return HttpResponseForbidden()\n\n aidant = request.user\n\n usager_demarches = aidant.get_active_demarches_for_usager(connection.usager)\n\n demarches = {\n nom_demarche: settings.DEMARCHES[nom_demarche]\n for nom_demarche in usager_demarches\n }\n\n return render(\n request,\n \"aidants_connect_web/id_provider/fi_select_demarche.html\",\n {\n \"connection_id\": connection.id,\n \"aidant\": request.user.get_full_name(),\n \"usager\": connection.usager,\n \"demarches\": demarches,\n },\n )\n\n else:\n parameters = {\n \"connection_id\": request.POST.get(\"connection_id\"),\n \"chosen_demarche\": request.POST.get(\"chosen_demarche\"),\n }\n\n try:\n connection = Connection.objects.get(pk=parameters[\"connection_id\"])\n if connection.is_expired:\n log.info(\"connection has expired at select_demarche\")\n return render(request, \"408.html\", status=408)\n except ObjectDoesNotExist:\n log.info(\"No connection corresponds to the connection_id:\")\n log.info(parameters[\"connection_id\"])\n logout(request)\n return HttpResponseForbidden()\n\n aidant = request.user\n autorisation = aidant.get_valid_autorisation(\n parameters[\"chosen_demarche\"], connection.usager\n )\n if not autorisation:\n log.info(\"The autorisation asked does not exist\")\n return HttpResponseForbidden()\n\n code = token_urlsafe(64)\n connection.code = make_password(code, settings.FC_AS_FI_HASH_SALT)\n connection.demarche = parameters[\"chosen_demarche\"]\n connection.autorisation = autorisation\n connection.complete = True\n connection.aidant = aidant\n connection.save()\n\n return redirect(\n f\"{settings.FC_AS_FI_CALLBACK_URL}?code={code}&state={connection.state}\"\n )\n\n\n# Due to `no_referer` error\n# https://docs.djangoproject.com/en/dev/ref/csrf/#django.views.decorators.csrf.csrf_exempt\n@csrf_exempt\ndef token(request):\n if request.method == \"GET\":\n return HttpResponse(\"You did a GET on a POST only route\")\n\n client_secret = request.POST.get(\"client_secret\")\n try:\n hash_client_secret = generate_sha256_hash(client_secret.encode())\n except AttributeError:\n return HttpResponseBadRequest()\n\n parameters = {\n \"code\": request.POST.get(\"code\"),\n \"grant_type\": request.POST.get(\"grant_type\"),\n \"redirect_uri\": request.POST.get(\"redirect_uri\"),\n \"client_id\": request.POST.get(\"client_id\"),\n \"hash_client_secret\": hash_client_secret,\n }\n EXPECTED_STATIC_PARAMETERS = {\n \"grant_type\": \"authorization_code\",\n \"redirect_uri\": settings.FC_AS_FI_CALLBACK_URL,\n \"client_id\": settings.FC_AS_FI_ID,\n \"hash_client_secret\": settings.HASH_FC_AS_FI_SECRET,\n }\n\n error, message = check_request_parameters(\n parameters, EXPECTED_STATIC_PARAMETERS, \"token\"\n )\n if error:\n return (\n HttpResponseBadRequest()\n if message == \"missing parameter\"\n else HttpResponseForbidden()\n )\n\n code_hash = make_password(parameters[\"code\"], settings.FC_AS_FI_HASH_SALT)\n try:\n connection = Connection.objects.get(code=code_hash)\n if connection.is_expired:\n log.info(\"connection has expired at token\")\n return render(request, \"408.html\", status=408)\n except ObjectDoesNotExist:\n log.info(\"403: /token No connection corresponds to the code\")\n log.info(parameters[\"code\"])\n return HttpResponseForbidden()\n\n id_token = {\n # The audience, the Client ID of your Auth0 Application\n \"aud\": settings.FC_AS_FI_ID,\n # The expiration time. in the format \"seconds since epoch\"\n # TODO Check if 10 minutes is not too much\n \"exp\": int(time.time()) + settings.FC_CONNECTION_AGE,\n # The issued at time\n \"iat\": int(time.time()),\n # The issuer, the URL of your Auth0 tenant\n \"iss\": settings.HOST,\n # The unique identifier of the user\n \"sub\": connection.usager.sub,\n \"nonce\": connection.nonce,\n }\n encoded_id_token = jwt.encode(id_token, client_secret, algorithm=\"HS256\")\n\n access_token = token_urlsafe(64)\n connection.access_token = make_password(access_token, settings.FC_AS_FI_HASH_SALT)\n connection.save()\n\n response = {\n \"access_token\": access_token,\n \"expires_in\": 3600,\n \"id_token\": encoded_id_token.decode(\"utf-8\"),\n \"refresh_token\": \"5ieq7Bg173y99tT6MA\",\n \"token_type\": \"Bearer\",\n }\n\n definite_response = JsonResponse(response)\n return definite_response\n\n\ndef user_info(request):\n auth_header = request.META.get(\"HTTP_AUTHORIZATION\")\n if not auth_header:\n log.info(\"403: Missing auth header\")\n return HttpResponseForbidden()\n\n pattern = re.compile(r\"^Bearer\\s([A-Z-a-z-0-9-_/-]+)$\")\n if not pattern.match(auth_header):\n log.info(\"Auth header has wrong format\")\n return HttpResponseForbidden()\n\n auth_token = auth_header[7:]\n auth_token_hash = make_password(auth_token, settings.FC_AS_FI_HASH_SALT)\n try:\n connection = Connection.objects.get(access_token=auth_token_hash)\n if connection.is_expired:\n log.info(\"connection has expired at user_info\")\n return render(request, \"408.html\", status=408)\n except ObjectDoesNotExist:\n log.info(\"403: /user_info No connection corresponds to the access_token\")\n log.info(auth_token)\n return HttpResponseForbidden()\n\n usager = model_to_dict(connection.usager)\n del usager[\"id\"]\n birthdate = usager[\"birthdate\"]\n birthplace = usager[\"birthplace\"]\n birthcountry = usager[\"birthcountry\"]\n usager[\"birthplace\"] = str(birthplace)\n usager[\"birthcountry\"] = str(birthcountry)\n usager[\"birthdate\"] = str(birthdate)\n\n Journal.log_autorisation_use(\n aidant=connection.aidant,\n usager=connection.usager,\n demarche=connection.demarche,\n access_token=connection.access_token,\n autorisation=connection.autorisation,\n )\n\n return JsonResponse(usager, safe=False)\n\n\ndef end_session_endpoint(request):\n if request.method != \"GET\":\n log.info(\"Request should be a GET @ end_session_endpoint\")\n return HttpResponseBadRequest()\n\n redirect_uri = settings.FC_AS_FI_LOGOUT_REDIRECT_URI\n if request.GET.get(\"post_logout_redirect_uri\") != redirect_uri:\n message = (\n f\"post_logout_redirect_uri is \"\n f\"{request.GET.get('post_logout_redirect_uri')} instead of \"\n f\"{redirect_uri} @ end_session_endpoint\"\n )\n log.info(message)\n return HttpResponseBadRequest()\n\n return HttpResponseRedirect(redirect_uri)\n", "sub_path": "aidants_connect_web/views/id_provider.py", "file_name": "id_provider.py", "file_ext": "py", "file_size_in_byte": 13240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "logging.basicConfig", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_ID", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 95, "usage_type": "name"}, {"api_name": "django.conf.settings.FC_AS_FI_CALLBACK_URL", "line_number": 96, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 96, "usage_type": "name"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 106, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 108, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects.create", "line_number": 111, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 111, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 134, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 138, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 141, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 142, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Usager.objects.get", "line_number": 145, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Usager.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Usager", "line_number": 145, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 152, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 153, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 154, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 160, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 79, "usage_type": "name"}, {"api_name": "aidants_connect_web.decorators.activity_required", "line_number": 80, "usage_type": "name"}, {"api_name": "aidants_connect_web.models.Connection.objects.get", "line_number": 174, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 178, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 181, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 182, "usage_type": "call"}, {"api_name": "django.conf.settings.DEMARCHES", "line_number": 189, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 193, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects.get", "line_number": 211, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 211, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 214, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 215, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 218, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 219, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 227, "usage_type": "call"}, {"api_name": "secrets.token_urlsafe", "line_number": 229, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 230, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_HASH_SALT", "line_number": 230, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 230, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 237, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_CALLBACK_URL", "line_number": 238, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 238, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 165, "usage_type": "name"}, {"api_name": "aidants_connect_web.decorators.activity_required", "line_number": 166, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 247, "usage_type": "call"}, {"api_name": "aidants_connect_web.utilities.generate_sha256_hash", "line_number": 251, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 253, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_CALLBACK_URL", "line_number": 264, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 264, "usage_type": "name"}, {"api_name": "django.conf.settings.FC_AS_FI_ID", "line_number": 265, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 265, "usage_type": "name"}, {"api_name": "django.conf.settings.HASH_FC_AS_FI_SECRET", "line_number": 266, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 266, "usage_type": "name"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 274, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 276, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 279, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_HASH_SALT", "line_number": 279, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 279, "usage_type": "name"}, {"api_name": "aidants_connect_web.models.Connection.objects.get", "line_number": 281, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 281, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 281, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 284, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 285, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 288, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_ID", "line_number": 292, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 292, "usage_type": "name"}, {"api_name": "time.time", "line_number": 295, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_CONNECTION_AGE", "line_number": 295, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 295, "usage_type": "name"}, {"api_name": "time.time", "line_number": 297, "usage_type": "call"}, {"api_name": "django.conf.settings.HOST", "line_number": 299, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 299, "usage_type": "name"}, {"api_name": "jwt.encode", "line_number": 304, "usage_type": "call"}, {"api_name": "secrets.token_urlsafe", "line_number": 306, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 307, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_HASH_SALT", "line_number": 307, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 307, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 318, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 244, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 326, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 328, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 331, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 334, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_HASH_SALT", "line_number": 334, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 334, "usage_type": "name"}, {"api_name": "aidants_connect_web.models.Connection.objects.get", "line_number": 336, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Connection.objects", "line_number": 336, "usage_type": "attribute"}, {"api_name": "aidants_connect_web.models.Connection", "line_number": 336, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 339, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 340, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 343, "usage_type": "call"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 345, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Journal.log_autorisation_use", "line_number": 354, "usage_type": "call"}, {"api_name": "aidants_connect_web.models.Journal", "line_number": 354, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 362, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 368, "usage_type": "call"}, {"api_name": "django.conf.settings.FC_AS_FI_LOGOUT_REDIRECT_URI", "line_number": 370, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 370, "usage_type": "name"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 378, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 380, "usage_type": "call"}]} +{"seq_id": "380042036", "text": "import os\nimport subprocess\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\n\nfrom FilmsApp.models import Film, Mark\n\nBASE_DIR = settings.BASE_DIR\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n self.create_all_marks()\n self.setup()\n\n def create_all_marks(self):\n with open('all_marks', 'w') as f:\n f.write(str(User.objects.all().order_by(\"-id\")[0].id) + ' ')\n f.write(str(Film.objects.all().order_by(\"-id\")[0].id) + ' ')\n f.write(str(Mark.objects.all().count()) + '\\n')\n for mark in Mark.objects.all():\n f.write(\n str(mark.user_id) + ' ' + str(mark.film_id) + ' ' + str(mark.mark) + ' ' + str(mark.mark_type_id) + '\\n'\n )\n\n def setup(self):\n os.chdir(BASE_DIR)\n subprocess.call('./setup')\n", "sub_path": "WTWApp/management/commands/make_learning.py", "file_name": "make_learning.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.settings.BASE_DIR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 20, "usage_type": "name"}, {"api_name": "FilmsApp.models.Film.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "FilmsApp.models.Film.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "FilmsApp.models.Film", "line_number": 21, "usage_type": "name"}, {"api_name": "FilmsApp.models.Mark.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "FilmsApp.models.Mark.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "FilmsApp.models.Mark", "line_number": 22, "usage_type": "name"}, {"api_name": "FilmsApp.models.Mark.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "FilmsApp.models.Mark.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "FilmsApp.models.Mark", "line_number": 23, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "334046056", "text": "import asyncio\nimport aiohttp\n\nsemaphore = asyncio.Semaphore(10)\ndef get_url():\n url_lis = []\n for i in range(0, 100):\n url = 'https://spa5.scrape.center/api/book/?limit=18&offset={}'.format(i * 18)\n url_lis.append(url)\n return url_lis\n\n\nurl_lis = get_url()\n\n\nasync def request(url):\n async with semaphore:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as r:\n await asyncio.sleep(1)\n return await r.json()\n\n\nasync def main():\n await asyncio.gather(*[request(url) for url in url_lis])\n\n\nif __name__ == '__main__':\n import time\n\n start = time.time()\n asyncio.get_event_loop().run_until_complete(main())\n print(time.time() - start)\n", "sub_path": "util/spa4.py", "file_name": "spa4.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "asyncio.Semaphore", "line_number": 4, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 25, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "245882454", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('investor', '0030_merge'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessangel',\n name='cap_origin',\n field=models.CharField(default=b'PERSONNAL', max_length=20, verbose_name=b'Vos revenus proviennent principalement de', choices=[(b'PERSONNAL', b'Salaire'), (b'RETIREMENT', b'Pension-Retraite-Rente'), (b'ESTATE', b'Revenus Fonciers'), (b'OTHER', b'Autre')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='diversification',\n field=models.CharField(blank=True, max_length=10, null=True, verbose_name=b'Diversification de votre patrimoine', choices=[(b'MISC', b'Diversification de votre patrimoine'), (b'INVESTMENT', b'Investissement \\xc3\\xa0 moyen / long terme'), (b'OPTIM', b'Optimisation fiscale'), (b'OTHER', b'Autres')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='estate_property',\n field=models.NullBooleanField(default=False, verbose_name=b\"Propri\\xc3\\xa9taire d'un autre bien immobilier\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='exit_question',\n field=models.NullBooleanField(default=False, verbose_name=b'Avez-vous conscience que vous aurez des difficult\\xc3\\xa9s \\xc3\\xa0 revendre vos titres ?', choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='finance_situation',\n field=models.NullBooleanField(default=False, verbose_name=b\"Situation professionelle dans le secteur financier de plus d'un an\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='investor_status',\n field=models.CharField(default=b'PHY', max_length=20, verbose_name=b\"Statut d'investisseur\", choices=[(b'PHY', b'Personne physique'), (b'MOR', b'Personne morale')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='is_pro',\n field=models.NullBooleanField(default=False, verbose_name=b\"\\xc3\\x8ates-vous un investisseur professionnel au sens de l'article 314-6 du r\\xc3\\xa8glement g\\xc3\\xa9n\\xc3\\xa9ral de l'AMF?\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='isf',\n field=models.NullBooleanField(default=False, verbose_name=b\"Assujeti \\xc3\\xa0 l'ISF\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='launder_money',\n field=models.NullBooleanField(default=False, verbose_name=b\"Les fonds que vous souhaitez investir proviennent-ils de sommes d\\xc3\\xa9pos\\xc3\\xa9es aupr\\xc3\\xa8s d'un \\xc3\\xa9tablissement de cr\\xc3\\xa9dit agr\\xc3\\xa9\\xc3\\xa9 en France?\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='lose_question',\n field=models.NullBooleanField(default=False, verbose_name=b'Avez-vous conscience que vous pouvez perdre \\xc3\\xa9ventuellement la totalit\\xc3\\xa9 de votre investissement ?', choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='products_operator',\n field=models.NullBooleanField(default=False, verbose_name=b\"Effectuer des op\\xc3\\xa9rations sur des instruments financiers d'un montant sup\\xc3\\xa9rieur \\xc3\\xa0 600\\xe2\\x82\\xac par op\\xc3\\xa9ration, avec un minimum de 10 op\\xc3\\xa9rations par trimestre en moyenne sur les 12 derniers mois.\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='products_owner',\n field=models.NullBooleanField(verbose_name=b\"\\xc3\\x8atre d\\xc3\\xa9tenteur d'instruments financiers d'une valeur sup\\xc3\\xa9rieure \\xc3\\xa0 500k\", choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='raising_amount',\n field=models.CharField(default=b'ALL', choices=[(b'ALL', b'Indiff\\xc3\\xa9rent'), (b'100-250', b'100k\\xe2\\x82\\xac \\xc3\\xa0 250k\\xe2\\x82\\xac'), (b'250-500', b'250k\\xe2\\x82\\xac \\xc3\\xa0 500k\\xe2\\x82\\xac'), (b'500+', b'plus de 500k\\xe2\\x82\\xac')], max_length=10, blank=True, null=True, verbose_name=b'Raising amount'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='residence_ownersip',\n field=models.NullBooleanField(default=False, verbose_name=b'Propri\\xc3\\xa9taire de la r\\xc3\\xa9sidence principale', choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='businessangel',\n name='sum_part',\n field=models.NullBooleanField(verbose_name=b'Cette somme repr\\xc3\\xa9sente-t-elle moins de 10%% de votre patrimoine total ?', choices=[(True, 'Oui'), (False, 'Non')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='venturecapital',\n name='phone_number',\n field=phonenumber_field.modelfields.PhoneNumberField(help_text='Format : +33612547389', max_length=128, verbose_name=b'Num\\xc3\\xa9ro de t\\xc3\\xa9l\\xc3\\xa9phone'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "api/investor/migrations/0031_auto_20141120_0432.py", "file_name": "0031_auto_20141120_0432.py", "file_ext": "py", "file_size_in_byte": 6330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.NullBooleanField", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 105, "usage_type": "name"}, {"api_name": "phonenumber_field.modelfields.modelfields.PhoneNumberField", "line_number": 108, "usage_type": "call"}, {"api_name": "phonenumber_field.modelfields.modelfields", "line_number": 108, "usage_type": "attribute"}, {"api_name": "phonenumber_field.modelfields", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "290954816", "text": "import os\r\nimport sys\r\nimport hashlib\r\nimport time\r\nimport sqlite3\r\nimport zlib\r\nimport datetime\r\nfrom sqlite3 import Error\r\n\r\nBLOCKSIZE = 524288\r\n\r\nstart_time = time.time()\r\n\r\n#walk_dir = sys.argv[1]\r\nwalk_dir = \"C:\"\r\nconn = sqlite3.connect('C:\\DataSets\\listing2.db')\r\nc = conn.cursor()\r\n# Create table\r\ntry:\r\n c.execute('''CREATE TABLE listing (file text, size number, hash text, last_modified real, listing_date real)''')\r\nexcept Error as e:\r\n print(e)\r\n\r\nprint('walk_dir = ' + walk_dir)\r\n\r\n# If your current working directory may change during script execution, it's recommended to\r\n# immediately convert program arguments to an absolute path. Then the variable root below will\r\n# be an absolute path as well. Example:\r\n# walk_dir = os.path.abspath(walk_dir)\r\nprint('walk_dir (absolute) = ' + os.path.abspath(walk_dir))\r\n\r\nfor root, subdirs, files in os.walk(walk_dir):\r\n print('--\\nroot = ' + root)\r\n #list_file_path = os.path.join(root, 'my-directory-list.txt')\r\n #print('list_file_path = ' + list_file_path)\r\n for subdir in subdirs: \r\n print('--\\nsubdir = ' + subdir )\r\n for file in files:\r\n print('--\\nfile = ' + file )\r\n file_path = os.path.join(root, file)\r\n #hasher=hashlib.blake2s()\r\n try: \r\n f_asum = 1\r\n with open(file_path, 'rb') as f:\r\n f_content = f.read(BLOCKSIZE)\r\n while len(f_content) > 0:\r\n #hasher.update(f_content)\r\n f_asum = zlib.adler32(f_content, f_asum)\r\n f_content = f.read(BLOCKSIZE)\r\n #f_hash=hasher.hexdigest()\r\n print('HASH: ' + str(f_asum)) \r\n f_size=os.path.getsize(file_path)\r\n last_modified = os.path.getmtime(file_path)\r\n print('Size: ' + str(f_size))\r\n print('Last_modified: ' + str(last_modified))\r\n chestia = file_path, f_size, f_asum, last_modified, str(time.time())\r\n c.execute(\"INSERT INTO LISTING VALUES (?,?,?,?,?)\",chestia)\r\n conn.commit()\r\n except Exception as e:\r\n print('Error: ' + file_path)\r\nconn.close()\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n\r\n ", "sub_path": "loopy2.py", "file_name": "loopy2.py", "file_ext": "py", "file_size_in_byte": 2214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "zlib.adler32", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "158780335", "text": "import json\nfrom django.views.generic import FormView\nfrom hl_bigdata.forms import PostForm\nfrom django.shortcuts import render\nfrom hl_bigdata import models as m\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.template import Context, loader\nfrom datetime import datetime, timedelta\nfrom googlefinance import getQuotes\n\ndef list(request):\n two_days_ago = datetime.utcnow() - timedelta(days=2)\n recent_posts = m.Post.objects.filter(created_at__gt=two_days_ago).all()\n context = Context({\n 'post_list': recent_posts\n })\n # Render accepts three arguments: the request object, the\n # path of the template file and the context\n return render(request, 'list.html', context)\n\ndef post_form_upload(request):\n if request.method == 'GET':\n form = PostForm()\n else:\n # A POST request: Handle Form Upload\n form = PostForm(request.POST) # Bind data from request.POST into a PostForm\n\n # If data is valid, proceeds to create a new post and redirect the user\n if form.is_valid():\n content = form.cleaned_data['content']\n created_at = form.cleaned_data['created_at']\n post = m.Post.objects.create(content=content,\n created_at=created_at)\n return HttpResponseRedirect(reverse('post',kwargs={'post_id': post.id}))\n\n return render(request, 'form_upload.html', {\n 'form': form,\n })\n\ndef post_upload(request):\n if request.method == 'GET':\n return render(request, 'upload.html', {})\n elif request.method == 'POST':\n post = m.Post.objects.create(content=request.POST['content'],\n created_at=datetime.utcnow())\n # No need to call post.save() at this point -- it's already saved.\n return HttpResponseRedirect(reverse('post_detail', kwargs={'post_id': post.id}))\n\n\ndef post_detail(request, post_id):\n try:\n post = m.Post.objects.get(pk=post_id)\n except m.Post.DoesNotExist:\n # If no Post has id post_id, we raise an HTTP 404 error.\n raise Http404\n return render(request, 'detail.html', {'post': post})\n\ndef epic_form_upload(request):\n if request.method == 'GET':\n form = PostForm()\n else:\n # A POST request: Handle Form Upload\n form = PostForm(request.POST) # Bind data from request.POST into a PostForm\n\n # If data is valid, proceeds to create a new post and redirect the user\n if form.is_valid():\n content = form.cleaned_data['EPIC_code']\n created_at = form.cleaned_data['created_at']\n epic = m.EPIC.objects.create(EPIC_code=content,\n created_at=created_at)\n return HttpResponseRedirect(reverse('epic',kwargs={'epic_id': epic.id}))\n\n return render(request, 'epic.html', {\n 'form': form,\n })\n\ndef epic_upload(request):\n if request.method == 'GET':\n return render(request, 'epic.html', {})\n elif request.method == 'POST':\n epic = m.EPIC.objects.create(EPIC_code=request.POST['EPIC_code'],created_at=datetime.utcnow())\n redirect_url = reverse('epic_detail', kwargs={'epic_id': epic.EPIC_code.encode('utf8')})\n return HttpResponseRedirect(redirect_url)\n\n\ndef epic_detail(request, epic_id):\n dump = json.dumps(getQuotes(epic_id), indent=2)\n context = {'dump': dump}\n return render(request, 'epic_detail.html', context)\n", "sub_path": "hl_bigdata/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 13, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post", "line_number": 14, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "hl_bigdata.forms.PostForm", "line_number": 24, "usage_type": "call"}, {"api_name": "hl_bigdata.forms.PostForm", "line_number": 27, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post.objects.create", "line_number": 33, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post", "line_number": 33, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post.objects.create", "line_number": 45, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post", "line_number": 45, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 48, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "hl_bigdata.models.Post", "line_number": 53, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 53, "usage_type": "name"}, {"api_name": "hl_bigdata.models.Post", "line_number": 54, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "hl_bigdata.forms.PostForm", "line_number": 61, "usage_type": "call"}, {"api_name": "hl_bigdata.forms.PostForm", "line_number": 64, "usage_type": "call"}, {"api_name": "hl_bigdata.models.EPIC.objects.create", "line_number": 70, "usage_type": "call"}, {"api_name": "hl_bigdata.models.EPIC", "line_number": 70, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "hl_bigdata.models.EPIC.objects.create", "line_number": 82, "usage_type": "call"}, {"api_name": "hl_bigdata.models.EPIC", "line_number": 82, "usage_type": "attribute"}, {"api_name": "hl_bigdata.models", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 84, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 88, "usage_type": "call"}, {"api_name": "googlefinance.getQuotes", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "7621461", "text": "#!/usr/bin/env python3\n\nimport argparse\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\nimport pandas as pd\nimport numpy as np\nimport math\n\nimport os\nimport sys\nimport json\nimport glob\n\ndef main():\n # import sys\n # from IPython.core import ultratb\n # sys.excepthook = ultratb.FormattedTB(mode='Verbose',\n # color_scheme='Linux', call_pdb=1)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('workDir', type=str)\n args = parser.parse_args()\n\n trainF = os.path.join(args.workDir, 'train.csv')\n testF = os.path.join(args.workDir, 'test.csv')\n\n trainDf = pd.read_csv(trainF, sep=',')\n testDf = pd.read_csv(testF, sep=',')\n\n plotLoss(trainDf, testDf, args.workDir)\n plotErr(trainDf, testDf, args.workDir)\n\n initDf = os.path.join(args.workDir, 'D.init')\n if os.path.exists(initDf):\n initD = np.loadtxt(initDf)\n latestD = np.loadtxt(os.path.join(args.workDir, 'D.latest'))\n plotD(initD, latestD, args.workDir)\n\n loss_fname = os.path.join(args.workDir, 'loss.png')\n err_fname = os.path.join(args.workDir, 'err.png')\n loss_err_fname = os.path.join(args.workDir, 'loss-error.png')\n os.system('convert +append \"{}\" \"{}\" \"{}\"'.format(loss_fname, err_fname, loss_err_fname))\n print('Created {}'.format(loss_err_fname))\n\ndef plotLoss(trainDf, testDf, workDir):\n # fig, ax = plt.subplots(1, 1, figsize=(5,2))\n fig, ax = plt.subplots(1, 1)\n # fig.tight_layout()\n\n trainEpoch = trainDf['epoch'].values\n trainLoss = trainDf['loss'].values\n\n N = np.argmax(trainEpoch==1.0)\n trainEpoch = trainEpoch[N:]\n trainLoss = [sum(trainLoss[i-N:i])/N for i in range(N, len(trainLoss))]\n plt.plot(trainEpoch, trainLoss, label='Train')\n if not testDf.empty:\n plt.plot(testDf['epoch'].values, testDf['loss'].values, label='Test')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"MSE\")\n plt.xlim(xmin=0)\n plt.grid(b=True, which='major', color='k', linestyle='-')\n plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.2)\n plt.legend()\n # ax.set_yscale('log')\n ax.set_ylim(0, None)\n for ext in ['pdf', 'png']:\n f = os.path.join(workDir, \"loss.\"+ext)\n fig.savefig(f)\n print(\"Created {}\".format(f))\n\ndef plotErr(trainDf, testDf, workDir):\n # fig, ax = plt.subplots(1, 1, figsize=(5,2))\n fig, ax = plt.subplots(1, 1)\n # fig.tight_layout()\n\n trainEpoch = trainDf['epoch'].values\n trainLoss = trainDf['err'].values\n\n N = np.argmax(trainEpoch==1.0)\n trainEpoch = trainEpoch[N:]\n trainLoss = [sum(trainLoss[i-N:i])/N for i in range(N, len(trainLoss))]\n plt.plot(trainEpoch, trainLoss, label='Train')\n if not testDf.empty:\n plt.plot(testDf['epoch'].values, testDf['err'].values, label='Test')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\")\n plt.xlim(xmin=0)\n plt.grid(b=True, which='major', color='k', linestyle='-')\n plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.2)\n plt.legend()\n # ax.set_yscale('log')\n ax.set_ylim(0, None)\n for ext in ['pdf', 'png']:\n f = os.path.join(workDir, \"err.\"+ext)\n fig.savefig(f)\n print(\"Created {}\".format(f))\n\ndef plotD(initD, latestD, workDir):\n def p(D, fname):\n plt.clf()\n lim = max(np.abs(np.min(D)), np.abs(np.max(D)))\n clim = (-lim, lim)\n plt.imshow(D, cmap='bwr', interpolation='nearest', clim=clim)\n plt.colorbar()\n plt.savefig(os.path.join(workDir, fname))\n\n p(initD, 'initD.png')\n p(latestD, 'latestD.png')\n\n latestDs = latestD**6\n latestDs = latestDs/np.sum(latestDs, axis=1)[:,None]\n I = np.argsort(latestDs.dot(np.arange(latestDs.shape[1])))\n latestDs = latestD[I]\n initDs = initD[I]\n\n p(initDs, 'initD_sorted.png')\n p(latestDs, 'latestD_sorted.png')\n\n # Dcombined = np.concatenate((initDs, np.zeros((initD.shape[0], 10)), latestDs), axis=1)\n # p(Dcombined, 'Dcombined.png')\n\nif __name__ == '__main__':\n main()\n", "sub_path": "sudoku/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "621885187", "text": "from dataclasses import dataclass\n\nfrom .color import Color\nfrom .point import Point\n\n\n@dataclass\nclass Triangle:\n '''\n Хранит информацию о трегуольнике.\n '''\n\n color: Color\n a: Point\n b: Point\n c: Point\n\n @staticmethod\n def read(buffer) -> 'Triangle':\n color = Color.read(buffer)\n buffer.skip_whitespaces()\n a = Point.read(buffer)\n buffer.skip_whitespaces()\n b = Point.read(buffer)\n buffer.skip_whitespaces()\n c = Point.read(buffer)\n return Triangle(color, a, b, c)\n\n def perimiter(self):\n return (self.a.distance(self.b)\n + self.b.distance(self.c)\n + self.c.distance(self.a))\n\n def __str__(self) -> str:\n return (f'Triangle: color={self.color}, '\n f'points={self.a} - {self.b} - {self.c}')\n", "sub_path": "AVS3/triangle.py", "file_name": "triangle.py", "file_ext": "py", "file_size_in_byte": 869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "color.Color", "line_number": 13, "usage_type": "name"}, {"api_name": "point.Point", "line_number": 14, "usage_type": "name"}, {"api_name": "point.Point", "line_number": 15, "usage_type": "name"}, {"api_name": "point.Point", "line_number": 16, "usage_type": "name"}, {"api_name": "color.Color.read", "line_number": 20, "usage_type": "call"}, {"api_name": "color.Color", "line_number": 20, "usage_type": "name"}, {"api_name": "point.Point.read", "line_number": 22, "usage_type": "call"}, {"api_name": "point.Point", "line_number": 22, "usage_type": "name"}, {"api_name": "point.Point.read", "line_number": 24, "usage_type": "call"}, {"api_name": "point.Point", "line_number": 24, "usage_type": "name"}, {"api_name": "point.Point.read", "line_number": 26, "usage_type": "call"}, {"api_name": "point.Point", "line_number": 26, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "146214300", "text": "# encoding:utf-8\n# memorizer main file\n\nfrom kivy.app import App\n# from kivy.uix.widget import Widget\nfrom kivy.uix.boxlayout import BoxLayout\n# from kivy.uix.label import Label\nfrom kivy.clock import Clock\n# from kivy.properties import ObjectProperty\n\n\nimport time\nimport random\n\n\n# count = 10\ncount = 3\n\nclass Memorizer(BoxLayout):\n\n def time_left(self, *kwargs): # ????\n print('Args: {}'.format(kwargs))\n global count\n print('count: {}'.format(count))\n time = str(count) + ' secs left'\n if count:\n count -= 1\n print('count decreased!')\n else:\n print(\"i'm here!\")\n self.ids.time_left.text = 'Time is over!'\n self.ids.time_left.text = time\n return None\n\n def number(self):\n return random.randint(100000, 999999)\n\nclass MemorizerApp(App):\n\n def build(self):\n self.title = 'Memorizer'\n memorizer = Memorizer()\n Clock.schedule_interval(memorizer.time_left, 1.0)\n return memorizer\n\n\nif __name__ == '__main__':\n MemorizerApp().run()\n", "sub_path": "memorizer.py", "file_name": "memorizer.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 19, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 38, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_interval", "line_number": 43, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "582091359", "text": "import torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\nimport sys\n\nfrom collections import defaultdict\nimport urllib, csv\n\n\ndef data_to_dict(data):\n data_dict = defaultdict(list)\n items = set()\n for (user, item) in data:\n data_dict[user].append(item)\n items.add(item)\n return data_dict, set(data_dict.keys()), items\n\n\ndef load_data_from_csv(csv_file, users_to_i={}, items_to_i={}, min_purchase_value=15):\n \"\"\"\n Loads data from a CSV file located at `csv_file`\n where each line is of the form:\n user_id_1, item_id_1\n ...\n user_id_n, item_id_n\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n for i, (user, item) in enumerate(csvreader):\n if i + 1 % 1000000 == 0:\n print(user, item)\n print(\"{} iter\".format(i))\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i,\n min_purchase_value=min_purchase_value)\n\n\ndef load_data_from_movielens(url, threshold, users_to_i = {}, items_to_i = {}):\n \"\"\"\n Loads movielens data from a URL, e.g.\n http://files.grouplens.org/datasets/movielens/ml-100k/\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n raw_data = []\n for index, line in enumerate(open(url, 'r')):\n if index > 100000:\n break\n if index == 0:\n continue\n user, item, rating, timestamp = line.split(',')\n if index + 1 % 10000:\n print(index + 1, user, item)\n if float(rating) > threshold:\n raw_data.append((user, item))\n return load_data_from_array(raw_data)\n\n\ndef load_data_from_array(array, users_to_i = {}, items_to_i = {}, min_purchase_value=3):\n \"\"\"\n Loads data from an array of tuples of the form:\n (user_id, item_id)\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n data = []\n count_u = {}\n\n # If already define\n if len(users_to_i.values()) > 0:\n u = max(users_to_i.values()) + 1\n else:\n u = 0\n if len(items_to_i.values()) > 0:\n i = max(items_to_i.values()) + 1\n else:\n i = 0\n\n # Check users and item with more than n occurances\n for j, (user, item) in enumerate(array):\n if not count_u.has_key(user):\n count_u[user] = 0\n count_u[user] += 1\n\n # Store real data indexes\n for k, (user, item) in enumerate(array):\n\n if count_u[user] < min_purchase_value:\n continue\n\n if k + 1 % 1000000 == 0:\n print(\"{} order\".format(k))\n\n if not users_to_i.has_key(user):\n users_to_i[user] = u\n u += 1\n\n if not items_to_i.has_key(item):\n items_to_i[item] = i\n i += 1\n data.append((users_to_i[user], items_to_i[item]))\n\n return data, users_to_i, items_to_i\n\n\nclass FactorizationMachine(object):\n def __init__(self, train_dict, _train_users, _train_items,\n order=3, ranks=[10, 5], data_size=10, learning_rate=1e-3):\n # Model parameters\n self.order = order\n self.data_size = data_size\n self.ranks = ranks\n\n # Training parameter\n self._train_dict = train_dict\n self._train_users = _train_users\n self._n_items = _train_items\n self.lr = learning_rate\n\n # Class param : output\n self.y = Variable(torch.randn(1), requires_grad=True, volatile=False)\n\n # Each factor order has its own rank and own matrix size\n self.V_dict = {i: Variable(torch.randn(data_size, order_rank),\n requires_grad=True, volatile=False)\n for i, order_rank in enumerate(ranks)}\n self.W = Variable(torch.randn(data_size, 1), requires_grad=True, volatile=False)\n\n def forward(self, sample):\n # For each order, we get the appropriate element\n # For order 1\n sum_ = self.W.dot(sample)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = (self.V_dict[idx][f, :].dot(sample)) ** o\n # sum_i( vif ** order xi ** order)\n elem += ((self.V_dict[idx][f, :] ** o).dot(sample ** o))\n sum_ += (1. / 2**(o - 1)) * elem\n return sum_\n\n def forward_couple(self, psample, nsample):\n self.y = self.forward(psample) - self.forward(nsample)\n\n def forward_backward(self, psample, nsample):\n print(self.W.shape, psample.shape)\n p_y = self.W.dot(psample)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = (self.V_dict[idx][f, :].dot(psample)) ** o\n # sum_i( vif ** order xi ** order)\n elem += ((self.V_dict[idx][f, :] ** o).dot(psample ** o))\n p_y += (1. / 2**(o - 1)) * elem\n\n n_y = self.W.dot(nsample)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = (self.V_dict[idx][f, :].dot(nsample)) ** o\n # sum_i( vif ** order xi ** order)\n elem += ((self.V_dict[idx][f, :] ** o).dot(nsample ** o))\n n_y += (1. / 2**(o - 1)) * elem\n\n y = p_y - n_y\n y.backward()\n\n self.W -= self.lr * self.W.grad\n for k in self.V_dict.keys():\n self.V_dict[k] -= self.lr * self.V_dict[k].grad\n\n def _uniform_user_sampling(self, n_samples):\n \"\"\"\n Creates `n_samples` random samples from training data for performing Stochastic\n Gradient Descent. We start by uniformly sampling users,\n and then sample a positive and a negative item for each\n user sample.\n \"\"\"\n sys.stderr.write(\"Generating %s random training samples\\n\" % str(n_samples))\n sgd_users = np.array(list(self._train_users))\\\n [np.random.randint(len(list(self._train_users)), size=n_samples)]\n sgd_pos_items, sgd_neg_items = [], []\n for sgd_user in sgd_users:\n pos_item = self._train_dict[sgd_user]\\\n [np.random.randint(len(self._train_dict[sgd_user]))]\n sgd_pos_items.append(pos_item)\n neg_item = np.random.randint(self._n_items)\n while neg_item in self._train_dict[sgd_user]:\n neg_item = np.random.randint(self._n_items)\n sgd_neg_items.append(neg_item)\n return sgd_users, sgd_pos_items, sgd_neg_items\n\n def full_forward_backward(self, user, pitem, nitem, max_user, sample_size):\n #print(max_user, sample_size, pitem, nitem)\n p = torch.zeros(sample_size)\n p[user] = 1\n p[max_user + pitem] = 1\n psample = Variable(p, requires_grad=True)\n\n s = torch.zeros(sample_size)\n s[user] = 1\n s[max_user + nitem] = 1\n nsample = Variable(s, requires_grad=True)\n\n p_y = self.W.dot(psample)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = (self.V_dict[idx][f, :].dot(psample)) ** o\n # sum_i( vif ** order xi ** order)\n elem += ((self.V_dict[idx][f, :] ** o).dot(psample ** o))\n p_y += (1. / 2**(o - 1)) * elem\n\n n_y = self.W.dot(nsample)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = (self.V_dict[idx][f, :].dot(nsample)) ** o\n # sum_i( vif ** order xi ** order)\n elem += ((self.V_dict[idx][f, :] ** o).dot(nsample ** o))\n n_y += (1. / 2**(o - 1)) * elem\n\n y = p_y - n_y\n y.backward()\n\n self.W.data -= self.lr * self.W.grad.data\n for k in self.V_dict.keys():\n self.V_dict[k].data -= self.lr * self.V_dict[k].grad.data\n\n def batch_forward_backward(self, users, pitems, nitems, max_user, sample_size):\n batch_size = len(pitems)\n\n p = torch.zeros(batch_size, sample_size)\n s = torch.zeros(batch_size, sample_size)\n for u in range(batch_size):\n p[u, users[u]] = 1\n p[u, max_user + pitems[u]] = 1\n s[u, users[u]] = 1\n s[u, max_user + nitems[u]] = 1\n\n psample = Variable(p, requires_grad=True)\n nsample = Variable(s, requires_grad=True)\n\n print('() : ', psample.data.numpy().shape, self.W.data.numpy().shape)\n p_y = torch.mm(psample, self.W)\n\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = torch.mm(psample, self.V_dict[idx]) ** o\n # sum_i( vif ** order xi ** order)\n elem += torch.mm((psample ** o), (self.V_dict[idx] ** o))\n p_y += (1. / 2**(o - 1)) * elem.sum(1)\n\n n_y = torch.mm(nsample, self.W)\n for idx, o in enumerate(range(2, self.order + 1)):\n for f in range(self.ranks[idx]):\n # sum_i( vif xi) ** order\n elem = torch.mm(nsample, self.V_dict[idx]) ** o\n # sum_i( vif ** order xi ** order)\n elem += torch.mm((nsample ** o), (self.V_dict[idx] ** o))\n n_y += (1. / 2**(o - 1)) * elem.sum(1)\n\n y = (p_y - n_y)\n solution = torch.ones((batch_size, 1))\n y.backward(solution)\n\n self.W.data -= self.lr * self.W.grad.data\n for k in self.V_dict.keys():\n self.V_dict[k].data -= self.lr * self.V_dict[k].grad.data\n\n", "sub_path": "fm_core/train_fm.py", "file_name": "train_fm.py", "file_ext": "py", "file_size_in_byte": 10702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 195, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "52831665", "text": "import requests\n\nfrom operator import itemgetter\n\nurl='https://hacker-news.firebaseio.com/v0/topstories.json'\n\nr= requests.get(url)\n# get the result json from the topstories to an boject submission_ids\nsubmission_ids = r.json()\n\n# an empty list\nsubmission_dicts = []\n\n# loops thorugh top 30 news from the above json\nfor submission_id in submission_ids[:30]:\n # creates another json url to request the information about the particular news\n url = (\"https://hacker-news.firebaseio.com/v0/item/\"+str(submission_id)+\".json\")\n submission_r = requests.get(url)\n\n # gets the status of the api call\n print(submission_r.status_code)\n # create an object that holds the new api call of each news\n response_dict = submission_r.json()\n\n # create a dictionary that holds the title , link and the number of comment from each news\n submission_dict = {\n 'title': response_dict['title'],\n 'link': \"http://news.ycombinator.com/item?id=\"+str(submission_id),\n 'comments': response_dict.get('descendants', 0)\n\n }\n\n # update the list of submission dicts with the dictionary submission dicts\n submission_dicts.append(submission_dict)\n\n# sort the above dictionary based on the key number of comments\nsubmission_dicts= sorted(submission_dicts, key=itemgetter('comments'), reverse=True)\n\n# a seperate print statement loop to print the information about each news their title link and the number of comments\nfor submission_dict in submission_dicts:\n\n print(\"\\nTitle:\", submission_dict['title'])\n\n print(\"Discussion link:\", submission_dict['link'])\n\n print(\"Comments:\", submission_dict['comments'])\n\n\n\n", "sub_path": "API call in Python/hn_submissions.py", "file_name": "hn_submissions.py", "file_ext": "py", "file_size_in_byte": 1638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "143495959", "text": "import collections\nimport re\n\nfrom buildbot.plugins import steps, util\nfrom buildbot.process import buildstep\nfrom buildbot.status.results import SUCCESS\nfrom twisted.internet import defer\nimport yaml\n\nimport environments as envs\n\n\nSERVO_REPO = \"https://github.com/servo/servo\"\n\n\nclass CheckRevisionStep(buildstep.BuildStep):\n \"\"\"\\\n Step which checks to ensure the revision that triggered the build\n is the same revision that we actually checked out,\n and fails the build if this is not the case.\n \"\"\"\n\n haltOnFailure = True\n flunkOnFailure = True\n\n def __init__(self, **kwargs):\n buildstep.BuildStep.__init__(self, **kwargs)\n\n @defer.inlineCallbacks\n def run(self):\n rev = self.getProperty('revision')\n got_rev = self.getProperty('got_revision')\n\n # `revision` can be None if the build is not tied to a single commit,\n # e.g. if \"force build\" is requested on the status page\n if rev is not None and rev != got_rev:\n raise Exception(\n \"Actual commit ({}) differs from requested commit ({})\".format(\n got_rev, rev\n )\n )\n\n yield defer.returnValue(SUCCESS)\n\n\nclass ServoFactory(util.BuildFactory):\n \"\"\"\\\n Build factory which checks out the servo repo as the first build step.\n \"\"\"\n\n def __init__(self, build_steps):\n \"\"\"\\\n Takes a list of Buildbot steps.\n Prefer using DynamicServoFactory to using this class directly.\n \"\"\"\n all_steps = [\n steps.Git(\n repourl=SERVO_REPO,\n mode=\"full\", method=\"fresh\", retryFetch=True\n ),\n CheckRevisionStep(),\n ] + build_steps\n # util.BuildFactory is an old-style class so we cannot use super()\n # but must hardcode the superclass here\n util.BuildFactory.__init__(self, all_steps)\n\n\nclass StepsYAMLParsingStep(buildstep.ShellMixin, buildstep.BuildStep):\n \"\"\"\\\n Step which reads the YAML steps configuration in the main servo repo\n and dynamically adds test steps.\n \"\"\"\n\n haltOnFailure = True\n flunkOnFailure = True\n workdir = None\n\n def __init__(self, builder_name, environment, yaml_path, **kwargs):\n kwargs = self.setupShellMixin(kwargs)\n buildstep.BuildStep.__init__(self, **kwargs)\n self.builder_name = builder_name\n self.environment = environment\n self.yaml_path = yaml_path\n\n def setDefaultWorkdir(self, workdir):\n buildstep.BuildStep.setDefaultWorkdir(self, workdir)\n self.workdir = workdir\n\n @defer.inlineCallbacks\n def run(self):\n self.is_windows = re.match('windows', self.builder_name)\n try:\n show_cmd = \"cat\" if not self.is_windows else \"type\"\n native_yaml_path = self.yaml_path\n if self.is_windows:\n native_yaml_path = native_yaml_path.replace('/', '\\\\')\n cmd = yield self.makeRemoteShellCommand(\n command=[show_cmd, native_yaml_path],\n collectStdout=True\n )\n yield self.runCommand(cmd)\n\n result = cmd.results()\n if result != util.SUCCESS:\n raise Exception(\"Command failed with return code: {}\" .format(\n str(cmd.rc)\n ))\n else:\n config = yaml.safe_load(cmd.stdout)\n builder_config = config[self.builder_name]\n\n commands = None\n env = self.environment\n env += envs.Environment(config.get('env', {}))\n if isinstance(builder_config, collections.Mapping):\n commands = builder_config['commands']\n env += envs.Environment(builder_config.get('env', {}))\n else:\n commands = builder_config\n\n dynamic_steps = [\n self.make_step(command, env) for command in commands\n ]\n except Exception as e: # Bad step configuration, fail build\n # Capture the exception and re-raise with a friendly message\n raise Exception(\"Bad step configuration for {}: {}\".format(\n self.builder_name,\n str(e)\n ))\n\n pkill_step = [self.make_pkill_step(\"servo\")]\n self.add_steps(pkill_step + dynamic_steps)\n\n defer.returnValue(result)\n\n def add_steps(self, steps):\n \"\"\"\\\n Adds new steps to this build, making sure to avoid name collisions\n by adding counts to disambiguate multiple steps of the same type,\n and respecting internal Buildbot invariants.\n Semi-polyfill for addStepsAfterLastStep from Buildbot 9.\n \"\"\"\n\n def step_type(step):\n return step.name.split('__')[0]\n\n name_counts = collections.Counter()\n\n # Check for existing max step counts for each type of step\n # in the existing steps on the build.\n # Adding multiple steps at the same time makes it more efficient\n # to check for collisions since this is amortized over all\n # steps added together.\n for step in self.build.steps:\n name_counts[step_type(step)] += 1\n\n # Add new steps, updating `name_counts` along the way\n for step in steps:\n existing_count = name_counts[step_type(step)]\n if existing_count > 0:\n # First step has count = 0 but no suffix,\n # so second step will have `__1` as suffix, etc.\n step.name += '__{}'.format(existing_count)\n name_counts[step_type(step)] += 1\n self._add_step(step)\n\n def _add_step(self, step):\n \"\"\"\\\n Adds a new step to this build, making sure to maintain internal\n Buildbot invariants.\n Do not call this method directly, but go through add_steps\n to prevent `name` collisions.\n \"\"\"\n step.setBuild(self.build)\n step.setBuildSlave(self.build.slavebuilder.slave)\n step.setDefaultWorkdir(self.workdir)\n self.build.steps.append(step)\n\n step_status = self.build.build_status.addStepWithName(step.name)\n step.setStepStatus(step_status)\n\n def make_step(self, command, env):\n step_kwargs = {}\n step_env = env\n\n command = command.split(' ')\n\n step_kwargs['command'] = command\n if self.is_windows:\n step_env += envs.Environment({\n # Set home directory, to avoid adding `cd` command every time\n 'HOME': r'C:\\buildbot\\slave\\{}\\build'.format(\n self.builder_name\n ),\n })\n\n step_desc = []\n step_class = steps.ShellCommand\n args = iter(command)\n for arg in args:\n if arg == './mach' or arg == 'mach.bat':\n mach_arg = next(args)\n step_desc = [mach_arg]\n\n # Change Step class to capture warnings as needed\n # (steps.Compile and steps.Test catch warnings)\n if re.match('build(-.*)?', mach_arg):\n step_class = steps.Compile\n elif re.match('package', mach_arg):\n step_class = steps.Compile\n elif re.match('test-.*', mach_arg):\n step_class = steps.Test\n\n # Provide credentials where necessary\n if re.match('upload-nightly', mach_arg):\n step_kwargs['logEnviron'] = False\n step_env += envs.upload_nightly\n\n # Capture any logfiles\n elif re.match('--log-.*', arg):\n logfile = next(args)\n if 'logfiles' not in step_kwargs:\n step_kwargs['logfiles'] = {}\n step_kwargs['logfiles'][logfile] = logfile\n\n else:\n step_desc += [arg]\n\n if step_class != steps.ShellCommand:\n step_kwargs['description'] = \"running\"\n step_kwargs['descriptionDone'] = \"ran\"\n step_kwargs['descriptionSuffix'] = \" \".join(step_desc)\n\n step_kwargs['env'] = step_env\n return step_class(**step_kwargs)\n\n def make_pkill_step(self, target):\n if self.is_windows:\n pkill_command = [\"powershell\", \"kill\", \"-n\", target]\n else:\n pkill_command = [\"pkill\", \"-x\", target]\n\n return steps.ShellCommand(\n command=pkill_command,\n decodeRC={0: SUCCESS, 1: SUCCESS}\n )\n\n\nclass DynamicServoFactory(ServoFactory):\n \"\"\"\\\n Smart factory which takes a list of shell commands\n from a YAML file located in the main servo/servo repository\n and creates the appropriate Buildbot Steps.\n Uses heuristics to infer Step type, if there are any logfiles, etc.\n \"\"\"\n\n def __init__(self, builder_name, environment):\n\n # util.BuildFactory is an old-style class so we cannot use super()\n # but must hardcode the superclass here\n ServoFactory.__init__(self, [\n StepsYAMLParsingStep(builder_name, environment,\n \"etc/ci/buildbot_steps.yml\")\n ])\n\n\ndoc = ServoFactory([\n # This is not dynamic because a) we need to pass the logEnviron kwarg\n # and b) changes to the documentation build are already encapsulated\n # in the upload_docs.sh script; any further changes should go through\n # the saltfs repo to avoid leaking the token.\n steps.ShellCommand(command=[\"etc/ci/upload_docs.sh\"],\n env=envs.doc,\n # important not to leak token\n logEnviron=False),\n])\n", "sub_path": "buildbot/master/files/config/factories.py", "file_name": "factories.py", "file_ext": "py", "file_size_in_byte": 9654, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "buildbot.process.buildstep.BuildStep", "line_number": 16, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep", "line_number": 16, "usage_type": "name"}, {"api_name": "buildbot.process.buildstep.BuildStep.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "buildbot.process.buildstep.BuildStep", "line_number": 27, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep", "line_number": 27, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 43, "usage_type": "call"}, {"api_name": "buildbot.status.results.SUCCESS", "line_number": 43, "usage_type": "argument"}, {"api_name": "twisted.internet.defer", "line_number": 43, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 29, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 29, "usage_type": "name"}, {"api_name": "buildbot.plugins.util.BuildFactory", "line_number": 46, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.util", "line_number": 46, "usage_type": "name"}, {"api_name": "buildbot.plugins.steps.Git", "line_number": 57, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps", "line_number": 57, "usage_type": "name"}, {"api_name": "buildbot.plugins.util.BuildFactory.__init__", "line_number": 65, "usage_type": "call"}, {"api_name": "buildbot.plugins.util.BuildFactory", "line_number": 65, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.util", "line_number": 65, "usage_type": "name"}, {"api_name": "buildbot.process.buildstep.ShellMixin", "line_number": 68, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep", "line_number": 68, "usage_type": "name"}, {"api_name": "buildbot.process.buildstep.BuildStep", "line_number": 68, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep.BuildStep.__init__", "line_number": 80, "usage_type": "call"}, {"api_name": "buildbot.process.buildstep.BuildStep", "line_number": 80, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep", "line_number": 80, "usage_type": "name"}, {"api_name": "buildbot.process.buildstep.BuildStep.setDefaultWorkdir", "line_number": 86, "usage_type": "call"}, {"api_name": "buildbot.process.buildstep.BuildStep", "line_number": 86, "usage_type": "attribute"}, {"api_name": "buildbot.process.buildstep", "line_number": 86, "usage_type": "name"}, {"api_name": "re.match", "line_number": 91, "usage_type": "call"}, {"api_name": "buildbot.plugins.util.SUCCESS", "line_number": 104, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.util", "line_number": 104, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 109, "usage_type": "call"}, {"api_name": "environments.Environment", "line_number": 114, "usage_type": "call"}, {"api_name": "collections.Mapping", "line_number": 115, "usage_type": "attribute"}, {"api_name": "environments.Environment", "line_number": 117, "usage_type": "call"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 134, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 134, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 89, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 89, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 147, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps", "line_number": 158, "usage_type": "name"}, {"api_name": "environments.Environment", "line_number": 190, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps.ShellCommand", "line_number": 198, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.steps", "line_number": 198, "usage_type": "name"}, {"api_name": "re.match", "line_number": 207, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps.Compile", "line_number": 208, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.steps", "line_number": 208, "usage_type": "name"}, {"api_name": "re.match", "line_number": 209, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps.Compile", "line_number": 210, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.steps", "line_number": 210, "usage_type": "name"}, {"api_name": "re.match", "line_number": 211, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps.Test", "line_number": 212, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.steps", "line_number": 212, "usage_type": "name"}, {"api_name": "re.match", "line_number": 215, "usage_type": "call"}, {"api_name": "environments.upload_nightly", "line_number": 217, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 220, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps.ShellCommand", "line_number": 229, "usage_type": "attribute"}, {"api_name": "buildbot.plugins.steps", "line_number": 229, "usage_type": "name"}, {"api_name": "buildbot.plugins.steps.ShellCommand", "line_number": 243, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps", "line_number": 243, "usage_type": "name"}, {"api_name": "buildbot.status.results.SUCCESS", "line_number": 245, "usage_type": "name"}, {"api_name": "buildbot.plugins.steps.ShellCommand", "line_number": 272, "usage_type": "call"}, {"api_name": "buildbot.plugins.steps", "line_number": 272, "usage_type": "name"}, {"api_name": "environments.doc", "line_number": 273, "usage_type": "attribute"}]} +{"seq_id": "384093639", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport tkMessageBox\nimport MySQLdb\nimport mysql.connector\nfrom fpdf import FPDF\nimport decimal\nfrom datetime import *\nimport os, sys\nfrom cargo import Cargo\n\n#Definimos la clase Recibo\nclass Recibo(object):\n __numero_Recibo = None\n\n \"\"\"Constructor\n * @param numero_Recibo,fechaPeriodo\n * @return no devuelve nada\n \"\"\"\n def __init__(self, numero_Recibo=\"\"):\n self.__numero_Recibo = numero_Recibo\n\n \"\"\"Getter numero_Recibo.\n * @param Ninguno.\n * @return devuelve el numero del recibo\n */\n \"\"\"\n def getNumero_Recibo(self):\n return self.__numero_Recibo\n\n \"\"\"Setter numero_Recibo.\n * @param numero_Recibo.\n * @return no devuelve nada.\n */\n \"\"\"\n def setNumero_Recibo(self, numero_Recibo):\n self.__numero_Recibo = numero_Recibo\n\n numero_Recibo = property(fget= getNumero_Recibo,fset=setNumero_Recibo)\n\n \"\"\"Función sueldo_Basico.\n * @param puntos.\n * @return calcula y devuelve el sueldo Básico del Docente.\n */\n \"\"\"\n def sueldo_Basico(self,puntos):\n sueldoBasico = round((3.437393 * puntos),2)\n return sueldoBasico\n\n \"\"\"Función monto_Anti.\n * @param los años y sueldo Básico.\n * @return calcula y devuelve el monto de la antigüedad.\n */\n \"\"\"\n def monto_Anti(self, anios, sueldoB):\n if anios > 25:\n monto_Anti = round((1.20 * sueldoB),2)\n else:\n porcentaje= [0,0.10,0.15,0.15,0.15,0.30,0.30,0.40,0.40,0.40,0.50,0.50,0.60,0.60,0.60,0.70,0.70,0.80,0.80,0.80,1,1,1.10,1.10,1.20]\n monto_Anti = round((porcentaje[anios] * sueldoB),2)\n return monto_Anti\n\n \"\"\"Función suma_Zona.\n * @param sueldo básico y el porcentaje de la zona.\n * @return calcula y devuelve la suma de la zona donde trabaja el doncente.\n */\n \"\"\"\n def suma_Zona(self, sueldoB, porcentajeZona):\n sumaZona = round((sueldoB * porcentajeZona),2)\n return sumaZona\n\n \"\"\" Función presentismo.\n * @param sueldo Básico y el monto de la antiguedad.\n * @return calcula y devuelve la suma del presentismo.\n */\n \"\"\"\n def presentismo(self, sueldoB,montoAnti):\n presentissmo = round(((sueldoB + montoAnti)* 0.75)*0.08 ,2)\n return presentissmo\n\n \"\"\" Función SubTotal1.\n * @param sueldo básico, monto Antiguedad, suma de la zona y presentismo.\n * @return calcular y devolver las ganancias del docente.\n */\n \"\"\"\n def subTotal1(self, sueldoB, montoAnti, sumaZona, presentismo):\n subtotal1 = round((sueldoB + montoAnti + sumaZona + presentismo),2)\n return subtotal1\n\n \"\"\"Función Jubilacion.\n * @param subTotal1.\n * @return calcula y devuelve el descuento de la jubilación.\n */\n \"\"\"\n def jubilacion(self, subtotal1):\n montoJubilacion = round((subtotal1 * 0.20),2)\n return montoJubilacion\n\n \"\"\" Funcion Obra Social.\n * @param subtotal1 y descuento de la obra social.\n * @return calcula y devuelve el monto de la obra social..\n */\n \"\"\"\n def obraSocial(self, subtotal1, descuento_Obra):\n descuentoObra = round((subtotal1 * descuento_Obra),2)\n return descuentoObra\n\n \"\"\"Funcion seguro.\n * @param .\n * @return devuelve el monto del seguro de vida.\n */\n \"\"\"\n def seguro(self):\n return 300\n\n \"\"\"Funcion subtotal2.\n * @param jubilacion, descuento de la obra social y seguro.\n * @return calcula y devuelve los descuentos hacia el docentes.\n */\n \"\"\"\n def subTotal2(self, jubilacion, descuento_Obra, seguro):\n subtotal2 = round((jubilacion + descuento_Obra + seguro),2)\n return subtotal2\n\n \"\"\"Funcion total.\n * @param subtotal1 subTotal2.\n * @return calcula y devuelve el monto final a cobrar por parte del docente.\n */\n \"\"\"\n def total(self, subtotal1, subtotal2):\n montoTotal = round((subtotal1 - subtotal2),2)\n return montoTotal\n\n \"\"\"Funcion calcularPeriodo.\n * @param ninguno.\n * @return calcula y devuelve el periodo del recibo a generar.\n */\n \"\"\"\n def calcularPeriodo(self):\n mes = str(datetime.today().month)\n anio = str(datetime.today().year)\n return (mes + anio)\n\n\n \"\"\"Funcion buscarPeriodo\n * @param ninguno.\n * @return devuelve el periodo del recibo.\n */\n \"\"\"\n def buscarPeriodo(self):\n periodo = []\n try:\n bd = MySQLdb.connect(\"localhost\",\"root\",\"gogole\",\"Recibo_Sueldo\" )\n cursor = bd.cursor()\n sql = \"SELECT fechaPeriodo FROM Recibo;\"\n cursor.execute(sql)\n resultados = cursor.fetchall()\n for registro in resultados:\n periodo.append(str(registro[0]))\n return periodo\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n bd.close()\n\n \"\"\" calcularRecibo.\n * @param ninguno.\n * @return calcula y guarda el recibo.\n */\n \"\"\"\n def calcularRecibo(self):\n try:\n bd = MySQLdb.connect(\"localhost\",\"root\",\"gogole\",\"Recibo_Sueldo\")\n cursor = bd.cursor()\n sql=\"SELECT c.cod_Cargo, tp.puntos_Cargos, zon.porcentaje_Zona, obra.descuento_Obra, c.fechaIngreso FROM Docente d INNER JOIN Cargo c on d.dni_Docente = c.dni_Docente INNER JOIN ObraSocial obra on obra.cod_ObraSocial = d.cod_ObraSocial INNER JOIN Tipo_Cargo tp on tp.cod_tipoCargo = c.cod_Cargo INNER JOIN Escuela esc on esc.numero_Escuela = c.numero_Escuela INNER JOIN Zona zon on zon.cod_Zona = esc.cod_Zona where d.activo='Y';\"\n cursor.execute(sql)\n resultados = cursor.fetchall()\n\n for registro in resultados:\n sueldoBasico = self.sueldo_Basico(registro[1])\n cargo = Cargo(registro[0])\n montoAntiguedad = self.monto_Anti(cargo.antiguedad(registro[4]), sueldoBasico)\n sumaZona = self.suma_Zona(sueldoBasico, registro[2])\n present = self.presentismo(sueldoBasico, montoAntiguedad)\n suBTotal1 = self.subTotal1(sueldoBasico, montoAntiguedad, sumaZona, present)\n jubi = self.jubilacion(suBTotal1)\n obraS = self.obraSocial(suBTotal1, registro[3])\n suBTotal2 = self.subTotal2(jubi, obraS, self.seguro())\n tot = self.total(suBTotal1, suBTotal2)\n fechaPeriodo= self.calcularPeriodo()\n sql=\"INSERT INTO Recibo(cod_Cargo, sueldoBasico, montoAnti, sumaZona,presentismo, subTotal1, jubilacion, desObraSoial, seguro, subTotal2, total,fechaPeriodo) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s' , '%s', '%s', '%s', '%s', '%s')\" % (registro[0], sueldoBasico, montoAntiguedad, sumaZona, present, suBTotal1, jubi, obraS, self.seguro(), suBTotal2, tot, self.calcularPeriodo())\n cursor.execute(sql)\n bd.commit()\n tkMessageBox.showinfo(\"AVISO\", \" Los Recibos fueron insertados con exito\")\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n \"\"\"Función crearPdf.\n * @param ninguno.\n * @return crea el pdf con el cargo del docente y lo abre.\n */\n \"\"\"\n def crearPdf(self):\n periodo = \"recibos\"\n try:\n bd = MySQLdb.connect(\"localhost\",\"root\",\"gogole\",\"Recibo_Sueldo\")\n cursor = bd.cursor()\n sql = \"SELECT DISTINCT e.nombre_Escuela,d.nomApe_Docente, d.dni_Docente, tp.descripcion_Cargo, r.numero_Recibo, r.sueldoBasico, r.montoAnti, r.sumaZona, r.presentismo, r.subTotal1, r.jubilacion,r.desObraSoial, r.seguro, r.subTotal2, r.Total, r.fechaPeriodo, c.fechaIngreso FROM Docente d INNER JOIN Cargo c on d.dni_Docente = c.dni_Docente INNER JOIN Tipo_Cargo tp on tp.cod_tipoCargo = c.cod_tipoCargo INNER JOIN Escuela e on e.numero_Escuela = c.numero_Escuela INNER JOIN Recibo r on r.cod_Cargo = c.cod_Cargo WHERE r.numero_Recibo ='%s'\" % self.getNumero_Recibo()\n cursor.execute(sql)\n resultados = cursor.fetchall()\n for registro in resultados:\n escuela = str(registro[0])\n nomApe = str(registro[1])\n dni = str(registro[2])\n cargo = str(registro[3])\n numero_recibo = str(registro[4])\n sueldoBasico = str(registro[5])\n monto_Anti = str(registro[6])\n suma_Zona = str(registro[7])\n presentismo = str(registro[8])\n subTotal1 = str(registro[9])\n jubilacion = str(registro[10])\n desObraSocial = str(registro[11])\n seguro = str(registro[12])\n subTotal2 = str(registro[13])\n total = str(registro[14])\n fechaPeriodo = str(registro[15])\n ingreso = str(registro[16])\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n bd.close()\n pdf = FPDF()\n pdf.add_page()\n pdf.image('factura.jpg',5,2,200,290)\n pdf.set_font('Arial', 'B', 10)\n pdf.text(96, 72 , escuela)\n pdf.text(161, 72 , fechaPeriodo)\n pdf.text(28, 89 , nomApe)\n pdf.text(69, 89 , dni)\n pdf.text(96, 89 , cargo)\n pdf.text(161, 89 , numero_recibo)\n pdf.text(74, 115 , \"$ \"+sueldoBasico)\n pdf.text(74, 120 , \"$ \"+monto_Anti)\n pdf.text(74, 125 , \"$ \"+suma_Zona)\n pdf.text(74, 130 , \"$ \"+presentismo)\n pdf.text(74, 140, \"$ \"+subTotal1)\n pdf.text(155, 116, \"$ \"+jubilacion)\n pdf.text(155, 121, \"$ \"+desObraSocial)\n pdf.text(155, 126, \"$ \"+seguro)\n pdf.text(150, 141, \"$ \"+subTotal2)\n pdf.text(145, 216, \"$ \"+total)\n pdf.text(29, 231 , ingreso)\n nombre = str(self.getNumero_Recibo())\n ext = '.pdf'\n salida = nombre+ext\n pdf.output(periodo+\"/\"+salida, 'F')\n abrir='recibos/'+str(self.getNumero_Recibo())+'.pdf'\n os.system('evince '+abrir)\n", "sub_path": "clases/recibo.py", "file_name": "recibo.py", "file_ext": "py", "file_size_in_byte": 9951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.today", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 141, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 153, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 161, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 161, "usage_type": "name"}, {"api_name": "MySQLdb.connect", "line_number": 172, "usage_type": "call"}, {"api_name": "cargo.Cargo", "line_number": 180, "usage_type": "call"}, {"api_name": "cargo.antiguedad", "line_number": 181, "usage_type": "call"}, {"api_name": "tkMessageBox.showinfo", "line_number": 193, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 194, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 194, "usage_type": "name"}, {"api_name": "MySQLdb.connect", "line_number": 205, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 228, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 228, "usage_type": "name"}, {"api_name": "fpdf.FPDF", "line_number": 231, "usage_type": "call"}, {"api_name": "os.system", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "269940010", "text": "from torch import nn\nimport torch\nfrom torch.nn import Sequential\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom hw_asr.base import BaseModel\n\n\nclass RnnModel(BaseModel):\n def __init__(self, n_feats, n_class, fc_hidden=512, *args, **kwargs):\n super().__init__(n_feats, n_class, fc_hidden, *args, **kwargs)\n self.n_layers = kwargs['n_layers']\n self.fc_hidden = fc_hidden\n self.rnn = nn.RNN(n_feats, fc_hidden, self.n_layers, batch_first=True)\n self.fc = nn.Linear(in_features=fc_hidden, out_features=n_class)\n\n def forward(self, spectrogram, *args, **kwargs):\n packed_inputs = pack_padded_sequence(spectrogram, kwargs[\"spectrogram_length\"],\n enforce_sorted=False, batch_first=True)\n\n out, _ = self.rnn(packed_inputs)\n out, _ = pad_packed_sequence(out, batch_first=True)\n out = self.fc(out)\n return {\"logits\": out}\n\n def transform_input_lengths(self, input_lengths):\n return input_lengths # we don't reduce time dimension here\n", "sub_path": "hw_asr/model/rnn_model.py", "file_name": "rnn_model.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "hw_asr.base.BaseModel", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.RNN", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_packed_sequence", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "621086196", "text": "'''\nName: Muhammad Khan\nDate: 03/20/2019\nAssignment06\n'''\n\nimport mailroom as m\nimport pytest as p\nimport os\nimport sys\n\ndef test_quit():\n with p.raises(SystemExit):\n assert m.quit()\n\n\ndef test_email_message():\n msg = \"\"\"\n \\rDear {:},\n\n \\rThank you so much for your generous donation of $ {:.2f}.\n\n \\rBest Regards,\n\n \\r -Team\"\"\"\n for name, donation in m.donors_data.items():\n assert m.email_message(name,donation[-1]) == msg.format(name,\n donation[-1])\n\n\ndef test_calculate_total_gift():\n for item in [items for items in m.calculate_total_gift()]:\n assert item[1] == sum(m.donors_data[item[0]][:])\n\n\ndef test_letter_format():\n\n msg = \"\"\"Dear {:},\n\n Thank you so much for your kind donation of ${:.2f}. With that you have\n generously donated a total amount of ${:.2f} in your last {} donation(s).\n We must ensure you that your donations will be put to a very good use.\n\n Sincerely,\n\n -Team \"\"\"\n for donor in m.calculate_total_gift():\n assert m.letter_format(*donor) == msg.format(*donor)\n\n\ndef test_sorted_list_desc():\n data = m.calculate_total_gift()\n assert m.sorted_list_desc()[0][1] == max([sum(data) for\n data in m.donations])\n\n\ndef test_send_letter_everyone():\n m.send_letter_everyone()\n assert os.path.exists(\"Letters\")\n for file in os.listdir(os.path.join(\"Letters\")):\n donor = file[0:-15]\n assert donor in m.donors_data.keys()\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "students/MzKhan/lesson10/mailroom/test_mailroom.py", "file_name": "test_mailroom.py", "file_ext": "py", "file_size_in_byte": 1670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pytest.raises", "line_number": 13, "usage_type": "call"}, {"api_name": "mailroom.quit", "line_number": 14, "usage_type": "call"}, {"api_name": "mailroom.donors_data.items", "line_number": 26, "usage_type": "call"}, {"api_name": "mailroom.donors_data", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mailroom.email_message", "line_number": 27, "usage_type": "call"}, {"api_name": "mailroom.calculate_total_gift", "line_number": 32, "usage_type": "call"}, {"api_name": "mailroom.donors_data", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mailroom.calculate_total_gift", "line_number": 47, "usage_type": "call"}, {"api_name": "mailroom.letter_format", "line_number": 48, "usage_type": "call"}, {"api_name": "mailroom.calculate_total_gift", "line_number": 52, "usage_type": "call"}, {"api_name": "mailroom.sorted_list_desc", "line_number": 53, "usage_type": "call"}, {"api_name": "mailroom.donations", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mailroom.send_letter_everyone", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "mailroom.donors_data.keys", "line_number": 62, "usage_type": "call"}, {"api_name": "mailroom.donors_data", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "628908392", "text": "import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver import ChromeOptions, FirefoxOptions\n\n\n@pytest.fixture\ndef chrome_browser():\n options = ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--start-fullscreen')\n wd = webdriver.Chrome(options=options)\n yield wd\n wd.quit()\n\n\n@pytest.fixture\ndef firefox_browser():\n options = FirefoxOptions()\n options.add_argument('--headless')\n options.add_argument('--start-fullscreen')\n wd = webdriver.Firefox(options=options)\n yield wd\n wd.quit()\n\n\ndef pytest_collection_modifyitems(items, config):\n browser = config.getoption('browser')\n if browser is not None:\n selected = []\n deselected = []\n\n for item in items:\n if browser in getattr(item, 'fixturenames'):\n selected.append(item)\n else:\n deselected.append(item)\n\n config.hook.pytest_deselected(items=deselected)\n items[:] = selected\n if not items:\n raise ValueError('Invalid browser name.')\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', help='Run tests only for certain browser.')\n parser.addoption('--opencart_url', default='http://127.0.0.1:8080/opencart/')\n", "sub_path": "hw-selenium-1/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 11, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 6, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.FirefoxOptions", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "511748143", "text": "import csv\nimport sqlite3\n\n\ndef load_csv(data, csv_file):\n with open(csv_file) as f:\n f_reader = csv.reader(f)\n # Skip Headers\n next(f_reader)\n for row in f_reader:\n data.append(row)\n\n\ndef create_table(name, c):\n # # Create table if doesnt exist\n c.execute('''CREATE TABLE IF NOT EXISTS {} (\n id integer PRIMARY KEY AUTOINCREMENT,\n name text NOT NULL,\n hp integer NOT NULL,\n phys_atk integer NOT NULL,\n mag_atk integer NOT NULL,\n phys_def integer NOT NULL,\n mag_def integer NOT NULL\n )'''.format(name))\n\n\ndef populate_table(name, data, c):\n c.executemany('''INSERT INTO {}(\n name,\n hp,\n phys_atk,\n mag_atk,\n phys_def,\n mag_def)\n VALUES (?,?,?,?,?,?)'''.format(name), data)\n\n\nif __name__ == \"__main__\":\n # Settings\n csv_file = \"player_classes.csv\"\n database_name = \"player_classes.db\"\n table_name = \"Classes\"\n\n # Load data from CSV\n data = []\n load_csv(data, csv_file)\n\n # Connect to database and create cursor\n con = sqlite3.connect(database_name)\n c = con.cursor()\n\n # Delete previous table\n c.execute(\"DROP TABLE IF EXISTS {}\".format(table_name))\n\n # Create table if it doesnt exist, then populate with data\n create_table(table_name, c)\n populate_table(table_name, data, c)\n\n # Save changes and close connection\n con.commit()\n con.close()\n", "sub_path": "src/actors/player_class_seed.py", "file_name": "player_class_seed.py", "file_ext": "py", "file_size_in_byte": 1529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "csv.reader", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "127943662", "text": "import itertools\nimport math\nimport sys\n\n\ndef input():\n return sys.stdin.readline()[:-1]\n\n\nN = int(input())\nXY = []\nfor _ in range(N):\n x, y = map(int, input().split())\n XY.append([x, y])\n\nsum_distance = 0\nfor x, y in list(itertools.combinations(range(N), 2)):\n sum_distance += math.sqrt(abs(XY[x][0]-XY[y][0])**2+abs(XY[x][1]-XY[y][1])**2)\nprint(sum_distance/(N/2))\n", "sub_path": "ABC_C/ABC145_C.py", "file_name": "ABC145_C.py", "file_ext": "py", "file_size_in_byte": 379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stdin.readline", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 17, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "296629896", "text": "## decoder group activity\n\nimport datetime\ndef main():\n De_Or_En = input(\"Do you want to decode or encode a message? (D/E):> \")\n if(De_Or_En == 'd' or De_Or_En == 'D'):\n theMessage = input(\"What message do you want to decode? \\n:> \")\n # startTime = datetime.datetime.now()\n finalMsg, elapsedTime = decode(theMessage)\n # endTime = datetime.datetime.now()\n # elapsedTime = endTime - startTime\n prntTime(elapsedTime)\n else:\n theMessage = input(\"What message do you want to encode?\\n:> \")\n while True:\n shiftDistance = int(input(\"Shift Distance (Please enter a number between 1 and 25) :> \"))\n if(shiftDistance >= 1 and shiftDistance <= 25):\n break\n finalMsg = encode(theMessage, shiftDistance)\n print(\"Encoded Message =>\", finalMsg, \"\\nShifted by\", shiftDistance, \"characters.\")\n\ndef decode(msg):\n startTime = datetime.datetime.now()\n\n distance = 1\n decodedMsg = \"\"\n\n while True:\n for ch in msg:\n ordvalue = ord(ch)\n ciphervalue = ordvalue - distance\n if ciphervalue < ord('a'):\n ciphervalue = ord('z') - (distance + (ord('a') - ordvalue - 1))\n decodedMsg += chr(ciphervalue)\n ans = input(\"Decode Message => \" + str(decodedMsg) + \"\\nIs this correct? (Y/N) :> \")\n if(ans == 'Y' or ans == 'y'):\n endTime = datetime.datetime.now()\n time = endTime - startTime\n break\n else:\n distance += 1\n decodedMsg = \"\"\n return decodedMsg, time\n\ndef prntTime(time):\n print(\"Seconds:\", time.seconds, \", MicroSeconds:\", time.microseconds)\n\ndef encode(msg, dist):\n encodedMsg = \"\"\n dist = dist\n for ch in msg:\n ordvalue = ord(ch)\n ciphervalue = ordvalue + dist\n if ciphervalue > ord('z'):\n ciphervalue = ord('a') + dist - (ord('z') - ordvalue + 1)\n encodedMsg += chr(ciphervalue)\n return encodedMsg\n\n\nmain()", "sub_path": "Bullin_Decoder_GroupProject.py", "file_name": "Bullin_Decoder_GroupProject.py", "file_ext": "py", "file_size_in_byte": 2010, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}]} +{"seq_id": "231416434", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nfrom astropy.io import fits\nimport numpy as np\nimport argparse\nimport sys\nimport os\n\ndef stack(files, out):\n \"\"\"\n Combine a list of fits files into a single cube and save\n the output to out.\n\n Parameters\n ----------\n files : list\n List of files\n \n out : str\n Filename to save\n \"\"\"\n\n ref = fits.open(files[0])\n data = np.empty((len(files), ref[0].data.shape[-2], ref[0].data.shape[-1]),\n dtype=np.float32)\n\n for i, f in enumerate(files):\n print('add {0}'.format(f))\n hdu = fits.open(f)\n data[i, :, :] = hdu[0].data\n \n ref[0].data = data\n ref.writeto(out, overwrite=True)\n print(\"wrote {0}\".format(out))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n group1 = parser.add_argument_group(\"Combine images into a cube\")\n group1.add_argument(\"--infile\", dest='infile', type=str, default=None,\n help=\"A list of fits images in a file. [optional]\")\n group1.add_argument(\"--in\", dest='files', type=str, default=None, nargs='+',\n help=\"Explicit list of files to include.\")\n group1.add_argument(\"--out\", dest='outfile', type=str, default=None,\n help=\"output filename\")\n results = parser.parse_args()\n\n\n if (results.infile is None) and (results.files is None):\n parser.print_help()\n sys.exit(1)\n\n if results.infile is not None:\n files = [l.strip() for l in open(results.infile).readlines()]\n else:\n files = results.files\n\n if len(files) < 2:\n print(\"not enough files, need at least 2 to make a cube\")\n print(\"given {0}\".format(files))\n sys.exit(1)\n stack(files=files, out=results.outfile)\n", "sub_path": "make_cube.py", "file_name": "make_cube.py", "file_ext": "py", "file_size_in_byte": 1808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "astropy.io.fits.open", "line_number": 24, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 30, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 30, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "410899419", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n\n\n#%% download latest translations from crowdin\n#pip install crowdin-cli-py --upgrade\nimport subprocess\nif subprocess.call(['crowdin-cli-py', 'download'])==1:\n raise ValueError(\"Download failes, maybe you need to adjust paths in corwdin.yaml!\")\n\n#%% Build translations\n#pip install polib\nimport polib\nfrom glob import glob\npaths = glob('lang/*/LC_MESSAGES/')\npaths=[p[5:10] for p in paths]\nfor p in paths:\n print(\"build %s\"%p)\n try:\n po = polib.pofile('lang/%s/LC_MESSAGES/customize.po'%p)\n po.save_as_mofile('lang/%s/LC_MESSAGES/customize.mo'%p)\n except OSError: \n print(\"no customize.po found\")\n \n try:\n po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)\n po.save_as_mofile('lang/%s/LC_MESSAGES/update.mo'%p)\n except OSError: \n print(\"no update.po found\")\n \n try:\n po = polib.pofile('lang/%s/LC_MESSAGES/site.po'%p)\n po.save_as_mofile('lang/%s/LC_MESSAGES/site.mo'%p)\n except OSError: \n print(\"no site.po found\")\n \n \n#%% Build minified version\n#pip install ply==3.4\n#pip install slimit\nfrom slimit import minify\n\ndef read_file(name):\n with open (name, \"r\", encoding=\"utf-8\") as f:\n return \"\".join(str(f.read()))#.replace('\\n', '')\n\ndef write_file(name,string):\n with open(name, \"w\", encoding=\"utf-8\") as f:\n f.write(string)\n\nadd=\"\"\"//(c)2017, MIT Style License \n//it is recommended to directly link to this file because we update the detection code\n\"\"\"\n\ntext=read_file(\"update.js\")\nminned=minify(text, mangle=False, mangle_toplevel=False)\nwrite_file(\"update.min.js\",add+minned)\n\ntext=read_file(\"update.show.js\")\nminned=minify(text, mangle=False, mangle_toplevel=False)\nwrite_file(\"update.show.min.js\",minned)\n\n\n# build npm versions of the script\nimport re\n\nt_upjs=read_file(\"update.js\")\nt_upjs=t_upjs.replace(\"\"\"$buo(window.$buoop);\"\"\",\"\"\"module.exports = $buo;\\n\"\"\")\n\nwrite_file(\"update.npm.js\",t_upjs)\n\n#combine both files into a single one\nt_upjs=t_upjs.replace(\"\"\"var e=document.createElement(\"script\");\ne.src = op.jsshowurl||(/file:/.test(location.href) && \"http://browser-update.org/update.show.min.js\") || \"//browser-update.org/update.show.min.js\";\ndocument.body.appendChild(e);\n\"\"\",\"$buo_show();\")\nt_upjs_npm=re.sub(r'jsv=\"([^\"]*)\";','jsv=\"\\\\1npm\";',t_upjs)\n\nt_showjs=read_file(\"update.show.js\")\nt_showjs=t_showjs.replace(\"\"\"$buo_show();\"\"\",\"\")\n\nwrite_file(\"update.npm.full.js\",t_upjs_npm+t_showjs)\n\n\n#build cloudflare versions\nt_upjs_cf=re.sub(r'jsv=\"([^\"]*)\";','jsv=\"\\\\1cf\";',t_upjs)\n\nwrite_file(\"update.cloudflare.js\",t_upjs_cf+t_showjs)\n\n\n#%%\nupload()\n#\nclear_cache()\n\n#%% publish to npm\nimport subprocess\nsubprocess.call(['npm', 'publish'])\n\n\n#%% Convert strings to javascript format\nst='Your web browser ({brow_name}) is out-of-date. Update your browser for more security, comfort and the best experience on this site. Update browser Ignore'\nimport polib\nfrom glob import glob\npaths = glob('lang/*/LC_MESSAGES/')\npaths=[p[5:10] for p in paths]\nfor p in paths:\n #print(\"build %s\"%p)\n #if p[:2] not in [\"vi\",\"hi\",\"sk\"]:\n # continue\n \n try:\n po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)\n except OSError: \n print(\"no update.po found\")\n if p in [\"rm_CH\",\"en_SE\"]:\n continue\n \n if p in [\"zh_TW\",\"sr_CS\"]:\n for i in po:\n if i.msgid==st:\n print(\"t[\\\"%s\\\"]='%s';\"%(p[:5].lower().replace(\"_\",\"-\"),i.msgstr.replace(\"\\n\",\"\").replace(\"'\",\"\\\\'\")))\n break\n else:\n for i in po:\n if i.msgid==st:\n if i.msgstr!=\"\":\n print(\"t.%s='%s';\"%(p[:2],i.msgstr.replace(\"\\n\",\"\").replace(\"'\",\"\\\\'\")))\n else:\n print(\"//t.%s='%s';\"%(p[:2],\"\"))\n \n break\n \n#%% download maxmind geoip database\n\n#wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz\n#gunzip GeoLite2-Country.mmdb.gz\n", "sub_path": "manage/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 4124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "subprocess.call", "line_number": 10, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "polib.pofile", "line_number": 22, "usage_type": "call"}, {"api_name": "polib.pofile", "line_number": 28, "usage_type": "call"}, {"api_name": "polib.pofile", "line_number": 34, "usage_type": "call"}, {"api_name": "slimit.minify", "line_number": 58, "usage_type": "call"}, {"api_name": "slimit.minify", "line_number": 62, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 79, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 88, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 100, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 107, "usage_type": "call"}, {"api_name": "polib.pofile", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "239125907", "text": "import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef assert_parameters(file_, parameters):\n for k, v in parameters.items():\n assert file_.contains(\"{} = {}\".format(k, v))\n\n\ndef test_hosts_file(host):\n assert_parameters(\n host.file(\"/etc/sysctl.conf\"),\n {\n \"vm.dirty_ratio\": 20,\n \"vm.dirty_background_ratio\": 15\n }\n )\n\n assert_parameters(\n host.file(\"/etc/sysctl.d/10-networking.conf\"),\n {\n \"net.ipv4.tcp_syncookies\": 1,\n \"net.ipv4.ip_forward\": 0\n }\n )\n\n assert_parameters(\n host.file(\"/etc/sysctl.d/20-security.conf\"),\n {\n \"kernel.dmesg_restrict\": 1\n }\n )\n", "sub_path": "molecule/default/tests/test_sysctl.py", "file_name": "test_sysctl.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "testinfra.utils.ansible_runner.utils.ansible_runner.AnsibleRunner", "line_number": 5, "usage_type": "call"}, {"api_name": "testinfra.utils.ansible_runner.utils", "line_number": 5, "usage_type": "attribute"}, {"api_name": "testinfra.utils.ansible_runner", "line_number": 5, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "105796094", "text": "from app import app\nfrom flask import Blueprint, jsonify, request, abort\n\nfrom .infra import check_auth_token\nfrom .models import Comment\nfrom .service import Service\nfrom .utils import response_message\n\ncomment_module = Blueprint('comment', __name__, url_prefix=\"/comments\")\n\n\ndef web_filter():\n token = request.headers.get('Authorization')\n if not token:\n abort(401)\n if not check_auth_token(token):\n abort(401)\n\n\n@comment_module.route('/', methods=['POST'], strict_slashes=False)\ndef add():\n try:\n assert 'key' in request.json and request.json['key'] is not None, abort(400)\n assert 'product_key' in request.json and request.json['product_key'] is not None, abort(400)\n assert 'comments' in request.json and request.json['comments'] is not None, abort(400)\n service = Service()\n comment = Comment(request.json['key'],\n request.json['product_key'],\n request.json['comments'])\n service.add(comment)\n return response_message(201, 'Comment created successfully')\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n\n\n@comment_module.route('/', methods=['PUT'], strict_slashes=False)\ndef edit(key):\n try:\n assert 'key' in request.json and request.json['key'] is not None, abort(400)\n assert 'product_key' in request.json and request.json['product_key'] is not None, abort(400)\n assert 'comments' in request.json and request.json['comments'] is not None, abort(400)\n service = Service()\n comment = Comment(request.json['key'],\n request.json['product_key'],\n request.json['comments'])\n service.edit(comment)\n return response_message(200, 'Comment edited successfully')\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n\n\n@comment_module.route('/', methods=['GET'], strict_slashes=False)\ndef get(key):\n try:\n service = Service()\n data = service.get(key)\n return jsonify(data), 200\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n\n\n@comment_module.route('/', methods=['GET'], strict_slashes=False)\ndef find_all():\n try:\n service = Service()\n data = service.find_all()\n return jsonify(data), 200\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n\n\n@comment_module.route('/', methods=['DELETE'], strict_slashes=False)\ndef delete(key):\n try:\n service = Service()\n service.delete(key)\n return response_message(204, 'Comment removed successfully')\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n\n\n@comment_module.route('/product/', methods=['GET'], strict_slashes=False)\ndef find_all_by_product_key(key):\n try:\n service = Service()\n data = service.find_all_by_product_key(key)\n return jsonify(data), 200\n except Exception as e:\n app.logger.error(e)\n return abort(500)\n", "sub_path": "comments-api/app/comment/comment_api.py", "file_name": "comment_api.py", "file_ext": "py", "file_size_in_byte": 3075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 15, "usage_type": "call"}, {"api_name": "infra.check_auth_token", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 25, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Comment", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "service.add", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.response_message", "line_number": 31, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 33, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 33, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 42, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Comment", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "service.edit", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.response_message", "line_number": 48, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 50, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 50, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 51, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 57, "usage_type": "call"}, {"api_name": "service.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 59, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 61, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 62, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 68, "usage_type": "call"}, {"api_name": "service.find_all", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 72, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 72, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 73, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 79, "usage_type": "call"}, {"api_name": "service.delete", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.response_message", "line_number": 81, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 83, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 83, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 84, "usage_type": "call"}, {"api_name": "service.Service", "line_number": 90, "usage_type": "call"}, {"api_name": "service.find_all_by_product_key", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 92, "usage_type": "call"}, {"api_name": "app.app.logger.error", "line_number": 94, "usage_type": "call"}, {"api_name": "app.app.logger", "line_number": 94, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "554575420", "text": "\"\"\"\r\n@note:detection\r\n\"\"\"\r\n\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom NN import CNN\r\nimport matplotlib.pyplot as plt\r\n\"\"\"\r\n@note: 由于二值化无法将由于色调相近但有差别的图像\r\n 显示出,于是尝试采用四值化处理\r\n\"\"\"\r\n\r\n\r\n# 多值化处理函数\r\ndef convert8(img):\r\n row, column = img.shape\r\n for i in range(row):\r\n for j in range(column):\r\n img[i][j] = int(img[i][j]/32)\r\n\r\n\r\n# 二值化处理函数\r\ndef convert2(img):\r\n row, column = img.shape\r\n for i in range(row):\r\n for j in range(column):\r\n if img[i][j] < 200:\r\n img[i][j] = 0\r\n else:\r\n img[i][j] = 1\r\n\r\n\r\n# 预处理\r\ndef pre_handle(file_name, size=(480, 600)):\r\n # 加载图片\r\n image = Image.open(file_name)\r\n # resize\r\n image = image.resize(size, Image.ANTIALIAS)\r\n # 统一转换图片为灰度图\r\n image = image.convert('L')\r\n # 统一转化为矩阵\r\n x = np.array(image, 'float64')\r\n return x\r\n\r\n\r\n# 定义卷积核函数,即各个滤波器\r\nfilter1 = np.array([[0, 0, 0, 0, 0],\r\n [1, 1, 1, 1, 1],\r\n [0, 0, 0, 0, 0],\r\n [-1, -1, -1, -1, -1],\r\n [0, 0, 0, 0, 0]], 'float64')\r\nfilter2 = np.array([[0, 1, 0, -1, 0],\r\n [0, 1, 0, -1, 0],\r\n [0, 1, 0, -1, 0],\r\n [0, 1, 0, -1, 0],\r\n [0, 1, 0, -1, 0]], 'float64')\r\nfilter3 = np.array([[0, 0, 1, 0, 0],\r\n [0, 1, 0, 0, 0],\r\n [1, 0, 0, 0, -1],\r\n [0, 0, 0, -1, 0],\r\n [0, 0, -1, 0, 0]], 'float64')\r\nfilter4 = np.array([[0, 0, -1, 0, 0],\r\n [0, -1, 0, 0, 0],\r\n [-1, 0, 0, 0, 1],\r\n [0, 0, 0, 1, 0],\r\n [0, 0, 1, 0, 0]], 'float64')\r\nfilter5 = np.array([[0, 0, 1, 0, 0],\r\n [0, 1, 0, 1, 0],\r\n [1, 0, 0, 0, 1],\r\n [0, -1, 0, -1, 0],\r\n [0, 0, -1, 0, 0]], 'float64')\r\nfilter6 = np.array([[0, 0, -1, 0, 0],\r\n [0, -1, 0, -1, 0],\r\n [1, 0, 0, 0, 1],\r\n [0, 1, 0, 1, 0],\r\n [0, 0, 1, 0, 0]], 'float64')\r\nfilter_sobel = np.array([[0., 1., 0., -1., 0.],\r\n [0., 1., 0., -1., 0.],\r\n [0., 2., 0., -2., 0.],\r\n [0., 1., 0., -1., 0.],\r\n [0., 1., 0., -1., 0.], \"float64\"])\r\n\r\n\r\n# 激活函数 relu\r\ndef relu(value):\r\n tmp = value.copy()\r\n if tmp < 0:\r\n return 0\r\n else:\r\n return tmp\r\n\r\n\r\n# convolution function , 步长1x1,padding为2\r\ndef conv2d(conv, ft):\r\n tmp = np.zeros((conv.shape[0]+4, conv.shape[1]+4))\r\n for ae1 in range(conv.shape[0]):\r\n for ae2 in range(conv.shape[1]):\r\n tmp[ae1+2][ae2+2] = conv[ae1][ae2]\r\n conv = tmp\r\n rs = np.ones((conv.shape[0] - ft.shape[0]+1, conv.shape[1] - ft.shape[1]+1))\r\n for i in range(conv.shape[0] - ft.shape[0]+1):\r\n for j in range(conv.shape[1] - ft.shape[1]+1):\r\n tmp = np.ones((ft.shape[0], ft.shape[1]))\r\n for k1 in range(ft.shape[0]):\r\n for k2 in range(ft.shape[1]):\r\n tmp[k1][k2] = conv[i+k1][j+k2]\r\n rs[i][j] = relu(np.sum(tmp*ft) + 1)\r\n return rs\r\n\r\n\r\n# max pooling function,2x2\r\ndef max_pool(pool, size=2):\r\n tmp = np.zeros((int(pool.shape[0]/size), int(pool.shape[1]/size)))\r\n for i in range(tmp.shape[0]):\r\n for j in range(tmp.shape[1]):\r\n unit = 0\r\n for k1 in range(size):\r\n for k2 in range(size):\r\n if unit < pool[i*size+k1][j*size+k2]:\r\n unit = pool[i*size+k1][j*size+k2]\r\n tmp[i][j] = unit\r\n return tmp\r\n\r\n\r\n# 矩阵一维化\r\ndef to_one_dim(array):\r\n rs = np.resize(array, (1, array.size))\r\n return rs\r\n\r\n\r\n# 打印矩阵size\r\ndef get_size(array):\r\n print(array.shape[0], array.shape[1])\r\n\r\n\r\n\"\"\"\r\n@note:开始进行卷积神经网络计算,\r\n 我的思路为先让机器有能力\r\n 识别出哪里是广告部分,剔除\r\n 背景信息(即与广告内容无关部分),\r\n 第二部采用sliding window的\r\n 方框检测法对比两张广告内容差异\r\n\"\"\"\r\n\r\n\r\n# 卷积图片,逐层提取特征\r\ndef convolution(x_in):\r\n # 第1层 卷积层\r\n layer1 = conv2d(x_in, filter1)\r\n plt.imshow(layer1)\r\n plt.show()\r\n # 第2层 池化层\r\n layer2 = max_pool(layer1)\r\n # 第3层 卷积层\r\n layer3 = conv2d(layer2, filter2)\r\n # 第4层 池化层\r\n layer4 = max_pool(layer3)\r\n # 第5层 卷积层\r\n layer5 = conv2d(layer4, filter3)\r\n # 第6层 池化层\r\n layer6 = max_pool(layer5)\r\n # 第7层 卷积层\r\n layer7 = conv2d(layer6, filter4)\r\n # 第8层 池化层\r\n layer8 = max_pool(layer7)\r\n return layer8\r\n\r\n\r\n# 定义全连接层各个参数\r\n# 学习率\r\nlr = 0.011\r\n# 训练标签\r\nlb = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\r\n# 初始权值\r\nw1 = np.random.random((37*30, 256))*2-1\r\nw2 = np.random.random((256, 16))*2-1\r\nw3 = np.random.random((16, 1))*2-1\r\n\r\n\r\n# sigmoid激活函数\r\ndef sigmoid(value):\r\n return 1/(1+np.exp(-value))\r\n\r\n\r\ndef dsigmoid(value):\r\n return value*(1 - value)\r\n\r\n\r\n# 全连接函数\r\ndef fc():\r\n global lb, l3, w1, w2, w3, x_train\r\n # 前向传播\r\n l1 = sigmoid(np.dot(x_train, w1))\r\n l2 = sigmoid(np.dot(l1, w2))\r\n l3 = sigmoid(np.dot(l2, w3))\r\n # 反向传播\r\n l3_delta = (l3 - lb.T)*dsigmoid(l3)\r\n l2_delta = np.dot(l3_delta, w3.T)*dsigmoid(l2)\r\n l1_delta = np.dot(l2_delta, w2.T)*dsigmoid(l1)\r\n w3_c = lr*np.dot(l2.T, l3_delta)\r\n w2_c = lr*np.dot(l1.T, l2_delta)\r\n w1_c = lr*np.dot(x_train.T, l1_delta)\r\n # 逐层改变权值\r\n w3 = w3 - w3_c\r\n w2 = w2 - w2_c\r\n w1 = w1 - w1_c\r\n\r\n\r\n# 测试函数\r\ndef test(x_test):\r\n w1_test = CNN.loadparam(\"w1.txt\")\r\n w2_test = CNN.loadparam(\"w2.txt\")\r\n w3_test = CNN.loadparam(\"w3.txt\")\r\n # 前向传播\r\n l1_test = sigmoid(np.dot(x_test, w1_test))\r\n l2_test = sigmoid(np.dot(l1_test, w2_test))\r\n l3_test = sigmoid(np.dot(l2_test, w3_test))\r\n return l3_test[0][0]\r\n\r\n\r\n# 梯度下降函数\r\ndef gradient_descent(epoch_num):\r\n global lr, lb\r\n for _ in range(epoch_num):\r\n fc()\r\n e = np.mean(np.abs(l3 - lb.T))\r\n print(e)\r\n print(l3)\r\n\r\n\r\n# 处理函数\r\ndef operate(file_name):\r\n # 执行预处理\r\n x = pre_handle(\"./datasets/\" + file_name)\r\n # 执行卷积\r\n x = convolution(x)\r\n # 执行矩阵归一行\r\n x = to_one_dim(x)\r\n print(file_name + \" is done!\")\r\n return x\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 执行\r\n x1 = operate(\"1.jpg\")\r\n x2 = operate(\"2.jpg\")\r\n x3 = operate(\"3.jpg\")\r\n x4 = operate(\"4.jpg\")\r\n x5 = operate(\"5.jpg\")\r\n x6 = operate(\"6.jpg\")\r\n x7 = operate(\"7.jpg\")\r\n x8 = operate(\"8.jpg\")\r\n x9 = operate(\"9.jpg\")\r\n x10 = operate(\"10.jpg\")\r\n x11 = operate(\"11.jpg\")\r\n x12 = operate(\"12.jpg\")\r\n x13 = operate(\"13.jpg\")\r\n x14 = operate(\"14.jpg\")\r\n x15 = operate(\"15.jpg\")\r\n x16 = operate(\"16.jpg\")\r\n x17 = operate(\"17.jpg\")\r\n x18 = operate(\"18.jpg\")\r\n x19 = operate(\"19.jpg\")\r\n x20 = operate(\"20.jpg\")\r\n xt1 = operate(\"t1.jpg\")\r\n xt2 = operate(\"t2.jpg\")\r\n x_train = np.concatenate((x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,\r\n x12, x13, x14, x15, x16, x17, x18, x19, x20))\r\n # 开始训练\r\n gradient_descent(6000)\r\n print(test(xt1))\r\n print(test(xt2))\r\n CNN.memory(w1, w2, w3)\r\n", "sub_path": "pre.py", "file_name": "pre.py", "file_ext": "py", "file_size_in_byte": 7763, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PIL.Image.open", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.resize", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 203, "usage_type": "call"}, {"api_name": "NN.CNN.loadparam", "line_number": 212, "usage_type": "call"}, {"api_name": "NN.CNN", "line_number": 212, "usage_type": "name"}, {"api_name": "NN.CNN.loadparam", "line_number": 213, "usage_type": "call"}, {"api_name": "NN.CNN", "line_number": 213, "usage_type": "name"}, {"api_name": "NN.CNN.loadparam", "line_number": 214, "usage_type": "call"}, {"api_name": "NN.CNN", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 268, "usage_type": "call"}, {"api_name": "NN.CNN.memory", "line_number": 274, "usage_type": "call"}, {"api_name": "NN.CNN", "line_number": 274, "usage_type": "name"}]} +{"seq_id": "89721936", "text": "from django.conf.urls import include, url\nfrom rest_framework import routers\nfrom olo.api import views as olo_api_views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'match', olo_api_views.MatchView, 'match')\n\nurlpatterns = [\n\n url(r'^', include(router.urls)),\n url(r'^userlist/$', olo_api_views.UserlistAPIView.as_view(), name='userlist'),\n url(r'^roomlist/$', olo_api_views.RoomlistAPIView.as_view(), name='roomlist'),\n url(r'^like/$', olo_api_views.LikeAPIView.as_view(), name='like'),\n\n]", "sub_path": "olo/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 5, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 5, "usage_type": "name"}, {"api_name": "olo.api.views.MatchView", "line_number": 6, "usage_type": "attribute"}, {"api_name": "olo.api.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "olo.api.views.UserlistAPIView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "olo.api.views.UserlistAPIView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "olo.api.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "olo.api.views.RoomlistAPIView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "olo.api.views.RoomlistAPIView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "olo.api.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "olo.api.views.LikeAPIView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "olo.api.views.LikeAPIView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "olo.api.views", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "322896154", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import DataFrame\nimport sys\nimport array \n\n\n# In[5]:\n\n\ndf_data = pd.read_csv(\"BITS AIC 2019 - Reflexis Raw Dataset (1).csv\")\n\n\n# In[6]:\n\n\ndf_cs = pd.read_csv(\"wow.csv\") \n\n\n# In[19]:\n\n\n\n# In[13]:\n\n\n#df_mean = df_data[1:757].mean()\nlist=[]\nstore=[]\n#print(df_data[0:df_cs['Cummulative count'][0]-1].corr())\n\nfor i in range(len(df_cs)-1):\n if(i==0):\n df_mean = df_data[0:756].mean()\n list.append(df_mean['Average Sale Purchase'])\n store.append(df_mean['STORE'])\n \n\n else:\n m = df_cs['Cummulative count'][i]\n k = df_cs['Cummulative count'][i+1]\n a= df_cs['Store'][i]\n df_mean = df_data[m:k-1].mean()\n list.append(df_mean['Average Sale Purchase'])\n store.append(df_mean['STORE'])\n #print(df_mean)\n#print(list) \n#list.sort()\nprint(list) \nprint(store)\n\n\n# In[18]:\n\n\nmatplotlib.rcParams.update({'font.size': 14})\n\nf, axarr = plt.subplots(1,2, figsize=(20, 4))\naxarr[0].scatter( store, list,\n edgecolor='black', linewidth='1', s=70, alpha=0.7, c=\"#e84629\")\naxarr[0].set_xlabel(\"Store Number\")\naxarr[0].set_ylabel(\"av of average sales\")\naxarr[0].set_ylim(0, 1)\naxarr[0].set_yticks(np.arange(30, 80, 10))\naxarr[0].set_xticks(np.arange(0, 8000, 1000))\naxarr[0].grid(color='red', linestyle='--', linewidth=1, alpha=0.2)\naxarr[0].spines[\"top\"].set_visible(False)\naxarr[0].spines[\"right\"].set_visible(False)\naxarr[0].spines[\"bottom\"].set_visible(False)\naxarr[0].spines[\"left\"].set_visible(False)\nplt.show()\n", "sub_path": "AIC-Mean.py", "file_name": "AIC-Mean.py", "file_ext": "py", "file_size_in_byte": 1647, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.rcParams.update", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 64, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "111810308", "text": "import re\nimport binascii\nfrom collections import OrderedDict\nfrom Crypto.Cipher import AES\nimport cloudscraper\nimport requests\n\nsession = cloudscraper.create_scraper(interpreter='native', debug=False)\n\ndef makeCookie(response):\n if 'slowAES.decrypt' in response.text:\n try:\n cryptVars = OrderedDict(re.findall(r'(a|b|c)=toNumbers\\(\"(.*?)\"\\)', response.text))\n\n check = binascii.hexlify(\n AES.new(\n binascii.unhexlify(cryptVars['a']),\n AES.MODE_CBC,\n binascii.unhexlify(cryptVars['b'])\n ).decrypt(\n binascii.unhexlify(cryptVars['c'])\n )\n ).decode('ascii')\n\n data = {\n 'url': response.text.split('location.href=\"')[1].split('\"')[0],\n 'cookie': [\n response.text.split('document.cookie=\"')[1].split('=')[0],\n check\n ]\n }\n\n print(f\"Setting Human Check to {data['cookie'][1]}\")\n\n return data\n except:\n return 0\n else:\n return 0\n\ndef monitor(url):\n\n response = session.get(url)\n\n cookie = makeCookie(response)\n\n if cookie != 0:\n requests.utils.add_dict_to_cookiejar(\n session.cookies,\n {\n cookie['cookie'][0]: cookie['cookie'][1]\n }\n )\n\n url = cookie['url']\n\n return monitor(url)\n\n print(response.text)\n\nmonitor('https://www.consortium.co.uk/polar-skate-co-stripe-puffer-ivory-navy-pol-f19-stripepuffer-ivonvy.html')\n", "sub_path": "new-bypass.py", "file_name": "new-bypass.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cloudscraper.create_scraper", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 13, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 13, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 15, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 16, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 16, "usage_type": "name"}, {"api_name": "binascii.unhexlify", "line_number": 17, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 18, "usage_type": "name"}, {"api_name": "binascii.unhexlify", "line_number": 19, "usage_type": "call"}, {"api_name": "binascii.unhexlify", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.utils.add_dict_to_cookiejar", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "9420601", "text": "import pdfkit\n\n\ndef html_to_pdf(html, to_file):\n # 将wkhtmltopdf.exe程序绝对路径传入config对象\n path_wkthmltopdf = r'C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe'\n config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\n # 生成pdf文件,to_file为文件路径\n pdfkit.from_file(html, to_file, configuration=config)\n print('OK')\n\nhtml_to_pdf('index.html','index.pdf')\n\n", "sub_path": "docs/html/htmltopdf.py", "file_name": "htmltopdf.py", "file_ext": "py", "file_size_in_byte": 413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pdfkit.configuration", "line_number": 7, "usage_type": "call"}, {"api_name": "pdfkit.from_file", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "209793644", "text": "import os\nimport pickle\nimport torch\nfrom torch import optim\nfrom core.network import LSTM, train, test\n\n# Load DataLoader\nDATA_PATH = os.path.join('..', 'data')\nDATA_NAME = 'data_loader.pkl'\n\nwith open(os.path.join(DATA_PATH, DATA_NAME), 'rb') as f:\n data_dict = pickle.load(f)\n\ntrain_dl = data_dict['train_dl']\nval_dl = data_dict['val_dl']\ndev_dl = data_dict['dev_dl']\n\n# Configure training parameters\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nlearning_rate = 0.0002\nhidden_dim = 100\nnum_layers = 1\nnum_epoch = 100\n\nmodel = LSTM(hidden_dim=hidden_dim, num_layers=num_layers)\nmodel = model.to(device)\n\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train and test\ntrain(model, device, train_dl, val_dl, optimizer, num_epoch)\n\nloss, acc = test(model, device, dev_dl)\n\n# Save model\nDIR = os.path.join('..', 'model')\nNAME = 'model-{}_{}_{}_{:.4f}.pkl'.format(num_layers, hidden_dim, learning_rate, acc)\n\nif not os.path.exists(DIR):\n os.makedirs(DIR)\ntorch.save(model.state_dict(), os.path.join(DIR, NAME))\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1057, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "core.network.LSTM", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 30, "usage_type": "name"}, {"api_name": "core.network.train", "line_number": 33, "usage_type": "call"}, {"api_name": "core.network.test", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "46985470", "text": "import requests\nfrom bs4 import BeautifulSoup as bs\nimport os\ndef getTitle(soup):\n title=str(soup.find('div',attrs={'class':'red title'}))\n return title[title.find(\"title\")+7:title.find(\"\" in a:\n namecheck=1\n\n if 'href' in a:\n pr=\"https://manamoa17.net\"\n pr+=a[a.find(\"href=\")+6:a.find(\"\\\">\")]\n pr=pr.replace(\"amp;\",\"\")\n\n\n f.close()\n resource.close()\n os.remove(\"ground.txt\")\n", "sub_path": "Source/comeonURL.py", "file_name": "comeonURL.py", "file_ext": "py", "file_size_in_byte": 1241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "325556325", "text": "import gc\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils import data\n\n\nclass BenchmarkDataset(data.Dataset):\n\n def __init__(self, X, y):\n\n self.X = X\n self.y = y\n\n self.divide = False\n self.is_test = False\n self.debug = False\n\n def __len__(self):\n return self.X.shape[0]\n\n def __getitem__(self, index):\n if index not in range(0, len(self.X)):\n return self.__getitem__(np.random.randint(0, self.__len__()))\n\n image = self.X[index]\n\n if self.divide:\n image = image / 255.\n image = torch.from_numpy(image).float().permute([2, 0, 1]).contiguous()\n if self.debug:\n print(image.shape)\n\n if not self.is_test:\n target = self.y[index]\n return image, target\n\n if self.is_test:\n return (image,)\n", "sub_path": "pytorch/torch_dataset.py", "file_name": "torch_dataset.py", "file_ext": "py", "file_size_in_byte": 883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "428871614", "text": "#!/usr/bin/env python\n\"\"\"Problem Set 1 Code for QCB505 Fall 2020\n\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n__author__ = \"Scott Wolf\"\n__date__ = \"20200908\"\n__credits__ = [\"Scott Wolf\"]\n__version__ = \"1\"\n__status__ = \"Prototype\"\n__url__ = \"https://www.dropbox.com/sh/jauik83zfg0rtfe/AACugg-_9g4Mfo2fPIUq_zQea?dl=0\"\n\nL = 5\nkappa = 1\nbeta = 0.5\nNsteps = 100000\nEE = np.zeros((Nsteps, 1))\nS = np.zeros((Nsteps, 1))\n\nfor t in range(1, Nsteps):\n sold = S[t - 1]\n Eold = (kappa / 2) * ((sold / L) ** 2)\n snew = sold.copy()\n step = np.sign(np.random.normal())\n snew = sold + step\n Enew = (kappa / 2) * ((snew / L) ** 2)\n if np.exp(-beta * (Enew - Eold)) > np.random.uniform():\n S[t] = snew\n EE[t] = Enew\n else:\n S[t] = sold\n EE[t] = Eold\n\nplt.hist(np.sum(S, axis=1))\nplt.show()\n\nplt.plot(EE[0:1000])\nplt.show()\n\n# Section 2\n\nN = 100\nNsteps = 10000\ns = np.zeros((Nsteps, N))\ns[0, ] = np.sign(np.random.randn(N))\nh = 1\nbeta = 2\nEE = np.zeros(Nsteps)\n\nfor t in range(1,Nsteps):\n sold = s[t - 1,]\n Eold = -h * np.sum(sold)\n snew = sold.copy()\n flip = np.random.choice(100)\n snew[flip] = -sold[flip]\n Enew = -h * np.sum(snew)\n if np.exp(-beta * (Enew - Eold)) > np.random.rand(1):\n s[t,] = snew\n EE[t] = Enew\n else:\n s[t,] = sold\n EE[t] = Eold\n\nplt.hist(np.sum(s, axis=1))\nplt.show()\n\nplt.plot(EE/N)\nplt.show()\n\n# Section 3\n\nL = 10\nN = L**2\nnn = np.zeros((L,L))\nfor i in range(0,L):\n for j in range(0,L):\n nn[i,j] = (i-1)*L +(j-1) + 1", "sub_path": "20200908_pset/20200908_pset.py", "file_name": "20200908_pset.py", "file_ext": "py", "file_size_in_byte": 1551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "1481758", "text": "# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport time\nimport glob\nimport shutil\n\nDEBUG = False\nWS = '\\t'\nTYPE_PRE = \"de.julielab.jcore.types.\"\nPIPENAME = \"\"\nDEP_LIST = []\nDIR_LIST = []\nCAP_PROVIDED = []\nJSON_FILE = \"coordinates.json\"\n\n### HEADER ###\nHEAD = (\n\"\"\"\\n\"\"\" +\n\"\"\"\\n\"\"\"\n)\n\n\n### END ###\nEND = (\n\"\"\"\\t\\n\"\"\" +\n\"\"\"\\t\\t-1\\n\"\"\" +\n\"\"\"\\t\\timmediate\\n\"\"\" +\n\"\"\"\\t\\t\\n\"\"\" +\n\"\"\"\\t\\t\\n\"\"\" +\n\"\"\"\\t\\n\"\"\" +\n\"\"\"\\n\"\"\"\n)\n\n\n### PROJECTS COORDINATES ###\nJCOORDS = None\nwith open(JSON_FILE) as jfile:\n JCOORDS = json.load(jfile)\n # add short names (derived from key names) to components\nfor component in list(JCOORDS.keys()):\n if component != \"jcore version\":\n for short in list(JCOORDS[component]):\n JCOORDS[component][short][\"short\"] = short\n\nC_MAP = {\n \"cr\": {\"None\": \"None\"},\n \"ae\": {\"None\": \"None\"},\n \"cc\": {\"None\": \"None\"}\n }\n\nA_MAP = {\n \"cr\": \"None\",\n \"ae\": [\"None\"],\n \"cc\": \"None\"\n }\n\nc_dict = {\n \"cr\": \"Collection Reader\",\n \"ae\": \"Analysis Engine\",\n \"cc\": \"CAS Consumer\"\n }\n\n### BUILDING FUNCTIONS ###\ndef buildValue(vType, vValue):\n # e.g. data/inFiles\n VALUE = (\n \"\"\"<{}>{}\"\"\"\n ).format(vType, vValue, vType)\n\n return VALUE\n\n\ndef buildArrayValue(vType, vValues, tab=1):\n vValue = \"\\n\".join(\n [\"\\t{}{}\".format((tab + 1) * WS,\n buildValue(vType, v)) for v in vValues])\n vValue = vValue + \"\\n\"\n ARRAYVALUE = (\n \"\"\"\\n\"\"\" +\n \"\"\"{}\"\"\" +\n \"\"\"{}\"\"\"\n ).format(vValue,\n (tab + 1) * WS)\n\n return ARRAYVALUE\n\n\ndef buildNameValue(nvName, nvValue, tab=1):\n # e.g. NAME = InputDirectory\n NAME_VALUE_PAIR = (\n \"\"\"{}\\n\"\"\" +\n \"\"\"{}\\t{}\\n\"\"\" +\n \"\"\"{}\\t\\n\"\"\" +\n \"\"\"{}\\t\\t{}\\n\"\"\" +\n \"\"\"{}\\t\\n\"\"\" +\n \"\"\"{}\\n\"\"\"\n ).format(tab * WS, tab * WS,\n nvName, tab * WS, tab * WS,\n nvValue, tab * WS, tab * WS)\n\n return NAME_VALUE_PAIR\n\n\ndef buildConfigParams(cp_dict, tab=1):\n global DIR_LIST\n cp_string = \"\"\n cp_param_list = []\n for i in [\"mandatory\", \"optional\"]:\n cp_param_list.extend(cp_dict[i])\n for param in cp_param_list:\n if len(param[\"default\"]) != 0:\n if not isinstance(param[\"default\"], list):\n nv_pair = buildNameValue(param[\"name\"],\n buildValue(param[\"type\"], param[\"default\"]), tab + 1)\n else:\n # value is an ... \n nv_pair = buildNameValue(\n param[\"name\"],\n buildArrayValue(param[\"type\"], param[\"default\"], tab + 2),\n tab + 1)\n if param.get(\"dir\", False):\n if param[\"dir\"] == 'file':\n DIR_LIST.append(\n os.path.dirname(param[\"default\"]))\n elif param[\"dir\"] == 'folder':\n DIR_LIST.append(param[\"default\"])\n cp_string += nv_pair\n cp_string = cp_string.rstrip('\\n')\n\n CONFIG_PARAMS = (\n \"\"\"{}\\n\"\"\" +\n \"\"\"{}\\n\"\"\" +\n \"\"\"{}\"\"\"\n ).format(tab * WS, cp_string, tab * WS)\n\n return CONFIG_PARAMS\n\n\ndef buildCollectionReader(cr_dict):\n # e.g. cDescName=de.julielab.jcore.reader.file.desc.jcore-file-reader\n crDescName = cr_dict[\"desc\"]\n crConfigParams = buildConfigParams(cr_dict, 3)\n add2DepList(cr_dict)\n\n CR = (\n \"\"\"\\t\\n\"\"\" +\n \"\"\"\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"{}\\n\"\"\" +\n \"\"\"\\t\\t\\n\"\"\" +\n \"\"\"\\t\\n\"\"\").format(crDescName, crConfigParams)\n\n return CR\n\n\ndef buildCASProcs(casProcs, is_ae=True):\n global PIPENAME\n procs = \"\"\n if isinstance(casProcs, list):\n PIPENAME = casProcs[-1][\"short\"]\n for proc in casProcs:\n cpDescName = proc[\"desc\"]\n name = \", \".join([proc[\"name\"], proc[\"model\"]])\n cp = buildConfigParams(proc, 3)\n procs += buildCASProc(name, cpDescName, cp)\n add2DepList(proc)\n procs = procs.rstrip(\"\\n\")\n else:\n cp = buildConfigParams(casProcs, 3)\n cpDescName = casProcs[\"desc\"]\n procs = buildCASProc(casProcs[\"name\"], cpDescName, cp)\n add2DepList(casProcs)\n procs = procs.rstrip(\"\\n\")\n CAS_PROCS = \"\"\n if is_ae:\n CAS_PROCS =\\\n \"\"\"\\t\\n\"\"\"\n CAS_PROCS += (\"\"\"{}\\n\"\"\").format(procs)\n if not is_ae:\n CAS_PROCS += \"\"\"\\t\\n\"\"\"\n\n return CAS_PROCS\n\n\ndef buildCASProc(casName, casDescName, casCP):\n ### SINGLE CAS PROCESSOR ###\n CAS_PROC = (\n \"\"\"\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"{}\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\n\"\"\").format(casName, casDescName, casCP)\n\n return CAS_PROC\n\n\ndef add2DepList(cDict):\n global DEP_LIST\n # if a component has multiple descriptors, the json file has a flag\n # \"mult_desc: true\"; to be on par with the naming convention, the\n # different descriptors all have the same prefix (i.e. name of the mvn\n # artifact) and a \"-\" delimited suffix\n cDescName = cDict[\"desc\"]\n if (cDict.get(\"mult_desc\", \"false\")).lower() == \"true\":\n dep = cDescName.split('.')[-1]\n dep = \"-\".join(dep.split(\"-\")[:-1])\n else:\n dep = cDescName.split('.')[-1]\n DEP_LIST.append(dep)\n\n\ndef quitSystem():\n if DEBUG:\n print(\"\\n[DEBUG] Map of Components:\")\n print(A_MAP)\n sys.exit()\n\n\ndef clearScreen():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef removeLastComponent(component):\n if component == \"ae\":\n tmp = A_MAP[component].pop()\n prevComp = tmp\n if (tmp is \"None\") or (len(A_MAP[component]) == 0):\n A_MAP[component].append(\"None\")\n else:\n prevComp = A_MAP[component]\n A_MAP[component] = \"None\"\n checkForCapabilities(component, prevComp, remove=True)\n\n\ndef getCompName(component, index):\n name = \"None\"\n jShort = C_MAP[component][index]\n if jShort != \"None\":\n if component != \"ae\":\n name = \"{}\".format(\n JCOORDS[(c_dict[component]).lower()][jShort][\"name\"])\n else:\n name = \"{}, {}\".format(\n JCOORDS[(c_dict[component]).lower()][jShort][\"name\"],\n JCOORDS[(c_dict[component]).lower()][jShort][\"model\"]\n )\n\n return name\n\n\ndef checkForCapabilities(comp, coKey, remove=False):\n global CAP_PROVIDED\n\n fullCat = (c_dict[comp]).lower()\n cKey = C_MAP[comp][coKey]\n needCap = JCOORDS[fullCat][cKey][\"capabilities\"][\"in\"]\n\n matchCap = False\n missingCap = False\n unmetCap = []\n if not remove:\n if DEBUG:\n print(\"Provided capabilities: {}\\n\".format(CAP_PROVIDED))\n print(\"Component needs cap: {} - {}:\\n\\t{}\".format(\n fullCat, cKey, needCap))\n\n if len(needCap) <= 0:\n matchCap = True\n else:\n for inCap in needCap:\n if inCap not in CAP_PROVIDED:\n missingCap = True\n matchCap = False\n unmetCap.append(inCap)\n elif not missingCap:\n matchCap = True\n\n if matchCap:\n CAP_PROVIDED.extend(JCOORDS[fullCat][cKey][\"capabilities\"][\"out\"])\n else:\n remCap = JCOORDS[fullCat][cKey][\"capabilities\"][\"out\"]\n for oCap in remCap:\n CAP_PROVIDED.remove(oCap)\n\n return matchCap, unmetCap\n\n\ndef getComponent(component=\"ae\"):\n comp_string = \"\"\n comps = JCOORDS[(c_dict[component]).lower()]\n count = 0\n for i in sorted(list(comps.keys())):\n C_MAP[component][str(count)] = i\n if component == \"ae\":\n comp_string += \"\\t[{:>2}] {}, {}\\n\".format(count, comps[i][\"name\"],\n comps[i][\"model\"])\n else:\n comp_string += \"\\t[{:>2}] {}\\n\".format(count, comps[i][\"name\"])\n count += 1\n\n cr = None\n choice = \"\"\"Choose a {} from the following list:\"\"\"\n if component == \"ae\":\n choice = \"\"\"Add an {} from the following list:\"\"\"\n\n displ = \"\"\n while cr is None or cr not in [\"q\", \"p\"]:\n displayPipeline()\n cr = input(\n (choice +\n \"\"\"\\n{}\\nChoice (p for 'back to previous'; q for 'quit'; \"\"\" +\n \"\"\"r for 'remove last'){}: \"\"\")\n .format(c_dict[component], comp_string, displ)\n )\n cr = cr.lower()\n if cr in [str(x) for x in range(len(C_MAP[component]) - 1)]:\n matchCap, needCap = checkForCapabilities(component, cr)\n if matchCap:\n displ = \"\"\n if component == \"ae\":\n # add ae to stack\n if \"None\" in A_MAP[component]:\n A_MAP[component].remove(\"None\")\n A_MAP[component].append(cr)\n else:\n # replace previous cr/cc\n prevComp = A_MAP[component]\n A_MAP[component] = cr\n if prevComp != \"None\":\n checkForCapabilities(component, prevComp, remove=True)\n else:\n # report unmatched capabilities\n displ = (\"\\n[Input Capabilities aren't provided for {}: {} ]\"\n ).format(getCompName(component, cr), needCap)\n\n if cr == \"r\":\n displ = \"\"\n removeLastComponent(component)\n\n if cr == \"q\":\n quitSystem()\n elif cr == \"p\":\n modifyPipeline()\n\n\ndef displayPipeline():\n clearScreen()\n print((\"\"\"The current pipeline consists of\\n\"\"\" +\n \"\"\"Collection Reader:\\n\\t{}\"\"\" +\n \"\"\"Analysis Engine(s):\\n\\t{}\"\"\" +\n \"\"\"Collection Consumer:\\n\\t{}\"\"\" +\n \"\"\"Capabilities:\\n\\t{}\\n\"\"\"\n ).format(getCompName(\"cr\", A_MAP[\"cr\"]) + \"\\n\",\n \"; \".join([getCompName(\"ae\", x) for x in A_MAP[\"ae\"]]) + \"\\n\",\n getCompName(\"cc\", A_MAP[\"cc\"]) + \"\\n\",\n \"; \".join(sorted(set(CAP_PROVIDED))))\n )\n\n\ndef modifyPipeline():\n ac = None\n while ac is None or ac not in [\"r\", \"a\", \"c\", \"q\", \"n\"]:\n displayPipeline()\n ac = input(\"\"\"modify (r)eader, (a)nalysis engines or (c)onsumer\\n\"\"\" +\n \"\"\"(n for 'build current pipeline'; q for 'quit'): \"\"\")\n ac = ac.lower()\n\n if ac == \"q\":\n quitSystem()\n elif ac == \"r\":\n getComponent(\"cr\")\n elif ac == \"c\":\n getComponent(\"cc\")\n elif ac == \"n\":\n if DEBUG:\n print(\"\\n[DEBUG] Map of Components:\")\n print(A_MAP)\n pass\n else:\n getComponent()\n\n\ndef writePom():\n print(\"write POM...\")\n sys.stdout.flush()\n time.sleep(0.5)\n\n dependencies = \"\"\n for dep in DEP_LIST:\n dependencies += (\n \"\"\"\\t\\t\\n\"\"\" +\n \"\"\"\\t\\t\\tde.julielab\\n\"\"\" +\n \"\"\"\\t\\t\\t{}\\n\"\"\" +\n \"\"\"\\t\\t\\t[${{jcore-version}},]\\n\"\"\" +\n \"\"\"\\t\\t\\n\"\"\"\n ).format(dep)\n dependencies = dependencies.rstrip(\"\\n\")\n\n out_string = (\n \"\"\"\\n\"\"\" +\n \"\"\"\\n\"\"\" +\n \"\"\"\\t4.0.0\\n\"\"\" +\n \"\"\"\\t\\n\"\"\" +\n \"\"\"\\t\\tde.julielab\\n\"\"\" +\n \"\"\"\\t\\tjcore-pipelines\\n\"\"\" +\n \"\"\"\\t\\t{}\\n\"\"\" +\n \"\"\"\\t\\n\"\"\" +\n \"\"\"\\t{}\\n\"\"\" +\n \"\"\"\\t{}\\n\"\"\" +\n \"\"\"\\t\\n\"\"\" +\n \"\"\"{}\\n\"\"\" +\n \"\"\"\\t\\n\"\"\" +\n \"\"\"\"\"\"\n ).format(JCOORDS[\"jcore version\"], PIPENAME + \"-pipeline\",\n JCOORDS[\"analysis engine\"][PIPENAME][\"name\"] + \" Pipeline\",\n dependencies)\n with open(\"pom.xml\", 'w') as out_file:\n out_file.write(out_string)\n\n\ndef copyInstallScript():\n iScript = os.path.abspath(\"../installComponents_template\")\n shutil.copy(iScript, \"installComponents.sh\")\n\n subprocess.call(\n [\"chmod\", \"+x\", \"installComponents.sh\"]\n )\n\n\ndef writeExecutionScript(cpeName):\n print(\"create Scripts...\")\n sys.stdout.flush()\n time.sleep(0.5)\n\n xScript = (\n \"\"\"#!/bin/bash\\n\\n\"\"\" +\n \"\"\"java_libs=target/dependency\\n\\n\"\"\" +\n \"\"\"export CLASSPATH=`for i in $java_libs/*.jar; \"\"\" +\n \"\"\"do echo -n \"$i:\";done;echo -n \"\"`\\n\\n\"\"\" +\n \"\"\"$UIMA_HOME/bin/runCPE.sh {}\"\"\").format(cpeName)\n\n with open(\"runPipeline.sh\", 'w') as out_file:\n out_file.write(xScript)\n\n subprocess.call(\n [\"chmod\", \"+x\", \"runPipeline.sh\"]\n )\n\n\ndef createDirs():\n print(\"create Directories...\")\n sys.stdout.flush()\n time.sleep(0.5)\n for iDir in DIR_LIST:\n if not os.path.exists(iDir):\n os.makedirs(iDir)\n\ndef installTrove():\n foo = \"target\"+os.sep+\"dependency\"\n src_fi = \"jcore-mstparser-ae-2.*.jar\"\n os.chdir(foo)\n mst = glob.glob(src_fi)[0]\n\n # extract trove jar from mst repo\n subprocess.call(\n [\"jar\",\"xf\",mst,\"repo/de/julielab/jules-trove/1.3/jules-trove-1.3.jar\"]\n )\n # move trove jar to current dir\n shutil.copy2(\"repo/de/julielab/jules-trove/1.3/jules-trove-1.3.jar\",\"./\")\n\n # delete old folder\n shutil.rmtree(\"repo/\")\n\n # install jules-trove using maven as well?\n subprocess.call(\n [\"mvn\",\"install:install-file\",\"-Dfile=jules-trove-1.3.jar\",\"-DgroupId=de.julielab\",\n \"-DartifactId=jules-trove\",\"-Dversion=1.3\",\"-Dpackaging=jar\"]\n )\n\ndef installDependencies():\n print(\"install Dependencies...\")\n sys.stdout.flush()\n time.sleep(0.5)\n # run \"installDependencies.sh\" --> if all goes smoothly, fine\n # else tell user to correct errors and run \"installDependcies.sh\" again\n subprocess.call(\n [\"./installComponents.sh\"]\n )\n # if a component is mst-parser, install jules-trove\n # run script again?\n for ae_key in A_MAP[\"ae\"]:\n ae_key = C_MAP[\"ae\"][ae_key]\n if ae_key.startswith(\"mst\"):\n installTrove()\n\ndef buildCurrentPipeline():\n # COLLECTION READER\n cr = None\n cr_key = C_MAP[\"cr\"][A_MAP[\"cr\"]]\n cr_string = \"\"\n if cr_key.lower() != \"none\":\n cr = JCOORDS[\"collection reader\"][cr_key]\n cr_string = buildCollectionReader(cr)\n\n # ANALYSIS ENGINES\n ae_string = \"\"\n ae_list = []\n for ae_key in A_MAP[\"ae\"]:\n ae_key = C_MAP[\"ae\"][ae_key]\n ae = None\n if ae_key.lower() != \"none\":\n ae = JCOORDS[\"analysis engine\"][ae_key]\n ae_list.append(ae)\n if len(ae_list) != 0:\n ae_string = buildCASProcs(ae_list)\n\n # CAS CONSUMERS\n cc = None\n cc_key = C_MAP[\"cc\"][A_MAP[\"cc\"]]\n cc_string = \"\"\n if cc_key.lower() != \"none\":\n cc = JCOORDS[\"cas consumer\"][cc_key]\n cc_string = buildCASProcs(cc, False)\n\n if DEBUG:\n print(\"[DEBUG] List of Dependencies:\\n{}\".format(DEP_LIST))\n\n # write out\n foo = \"jcore-{}-pipeline\".format(PIPENAME)\n if not os.path.exists(foo):\n os.mkdir(foo)\n os.chdir(foo)\n fiName = \"{}-cpe.xml\".format(PIPENAME)\n out_string = HEAD + cr_string + ae_string + cc_string + END\n with open(fiName, 'w') as out_file:\n out_file.write(out_string)\n\n createDirs()\n writePom()\n copyInstallScript()\n writeExecutionScript(fiName)\n installDependencies()\n\n os.chdir(\"..\")\n\ndef checkSystemDependencies():\n return False\n\nif __name__ == \"__main__\":\n if sys.version.startswith(\"3\"):\n if len(sys.argv) > 1:\n if sys.argv[1].lower() == \"true\":\n DEBUG = True\n\n # check for UIMA and Maven\n checkSystemDependencies()\n\n modifyPipeline()\n\n print(\"\\nbuild pipeline ...\")\n sys.stdout.flush()\n time.sleep(0.5)\n buildCurrentPipeline()\n else:\n print(\"Your Python Version is {}\".format(sys.version))\n print(\"Please use Python Version 3.x\")\n", "sub_path": "jcore-cpe-builder/cpe-builder.py", "file_name": "cpe-builder.py", "file_ext": "py", "file_size_in_byte": 17223, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 231, "usage_type": "call"}, {"api_name": "os.system", "line_number": 235, "usage_type": "call"}, {"api_name": "os.name", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 400, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 400, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path", "line_number": 440, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 441, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 443, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 450, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 450, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 451, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 463, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 470, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 470, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 473, "usage_type": "call"}, {"api_name": "os.path", "line_number": 473, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 474, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 477, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 479, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 480, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 483, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 487, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 490, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 493, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 500, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 500, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 501, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path", "line_number": 548, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 549, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 550, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 562, "usage_type": "call"}, {"api_name": "sys.version.startswith", "line_number": 568, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 568, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 569, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 570, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 579, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 579, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 580, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 583, "usage_type": "attribute"}]} +{"seq_id": "243793853", "text": "import requests\nimport pymysql\nfrom bs4 import BeautifulSoup\n\n\ndef download_page(url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0\"}\n resource = requests.get(url, headers=headers)\n html = resource.text\n\n return html\n\n\ndef get_article_list_from_html(html):\n article_list = []\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find(id='content-left')\n\n for i in content.find_all(class_='article'):\n article_list.append(i)\n\n return article_list\n\n\ndef get_content(article):\n # print(article.find('span', class_='contentForAll'))\n if None is article.find('span', class_='contentForAll'):\n return article.find(class_='content').span.text\n else:\n base_url = 'https://www.qiushibaike.com'\n url = base_url + article.find('a', class_='contentHerf')['href']\n return get_content_from_url(url)\n\n\ndef get_content_from_url(url):\n html = download_page(url)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find('div', class_='content').text\n\n\ndef save_to_db(id, author, author_age, content, up_num, comment_num):\n host = 'localhost'\n port = 3306\n user = 'root'\n password = 'hello9504'\n db = 'crawler'\n charset = 'utf8mb4'\n\n sql = (\"\"\"insert into qiushi(id, author, author_age, content, \n up_num, commont_num) values('{}', '{}', {}, '{}', {}, {})\"\"\").format(id, author, author_age, content, up_num, comment_num)\n\n conn = pymysql.connect(host=host, port=port, user=user, password=password, db=db, charset=charset)\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n\n\ndef crawler_qiushi():\n html = download_page('https://www.qiushibaike.com/text/')\n article_list = get_article_list_from_html(html)\n for i in article_list:\n id = i.a['href']\n author = i.find('h2').text\n if author != '匿名用户':\n author_age = int(i.find('div', class_='articleGender').text)\n else:\n author_age = -1\n content = get_content(i)\n up_num = int(i.find('span', class_='stats-vote').find('i', class_='number').text)\n comment_num = int(i.find('span', class_='stats-comments').find('i', class_='number').text)\n save_to_db(id, author, author_age, content, up_num, comment_num)\n\n\ncrawler_qiushi()\n\n\n", "sub_path": "com/hello/crawler/1_qiushibaike_crawler.py", "file_name": "1_qiushibaike_crawler.py", "file_ext": "py", "file_size_in_byte": 2353, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 37, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "70817364", "text": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch import optim\r\nfrom torch.utils.data import DataLoader\r\nfrom HIGHLIGHTdataset import SEVDataset\r\nimport argparse\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import tqdm\r\nimport time\r\nimport IPython\r\nfrom torchvision import datasets, transforms\r\nimport warnings\r\nimport torch.optim.lr_scheduler as lr_scheduler\r\nfrom torch.utils.data.dataloader import default_collate\r\nfrom efficientnet_pytorch import EfficientNet # EfficientNet的使用需要倒入的库\r\nfrom label_smooth import LabelSmoothSoftmaxCE\r\n\r\ncurdir = os.path.dirname(__file__)\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--batch-size\", default=32, type=int)\r\n parser.add_argument(\"--lr\", default=1e-3, type=float)\r\n parser.add_argument(\"--weight-decay\", default=1e-3, type=float)\r\n parser.add_argument(\"--num-epoch\", default=10, type=int)\r\n parser.add_argument(\"--save-interval\", default=1, type=int)\r\n parser.add_argument(\"--step-interval\", default=10, type=int)\r\n parser.add_argument(\"--step-save\", default=1000, type=int)\r\n parser.add_argument(\"--evaluate-step\", default=100, type=int)\r\n parser.add_argument(\"--save-dir\", default=os.path.join(curdir, \"highlightmodels/\"))\r\n parser.add_argument(\"--total-updates\", default=50000, type=int)\r\n parser.add_argument('--gradient-accumulation-steps',\r\n type=int,\r\n default=10,\r\n help=\"Number of updates steps to accumualte before performing a backward/update pass.\")\r\n parser.add_argument(\"--model-type\", default='efficientnet-b0')\r\n parser.add_argument(\"--class_num\", default=2, type=int)\r\n parser.add_argument(\"--feature_extract\", default=True, type=bool)\r\n parser.add_argument(\"--cuda_num\", default=2, type=int)\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef efficientnet_params(model_name):\r\n \"\"\" Map EfficientNet model name to parameter coefficients. \"\"\"\r\n params_dict = {\r\n # Coefficients: width,depth,res,dropout\r\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\r\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\r\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\r\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\r\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\r\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\r\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\r\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\r\n }\r\n return params_dict[model_name]\r\n\r\ndef train(args):\r\n print(args)\r\n args.save_dir = args.save_dir + args.model_type\r\n args.save_dir += \"_highlight_\"\r\n args.save_dir = args.save_dir + time.strftime('%Y-%m-%d-%H-%M-%S')\r\n os.makedirs(args.save_dir, exist_ok=True)\r\n print(args.save_dir, 'make!')\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n torch.cuda.set_device(args.cuda_num)\r\n print(device)\r\n if torch.cuda.is_available():\r\n print('device: ', torch.cuda.current_device())\r\n\r\n data_transform = transforms.Compose([\r\n transforms.Resize(efficientnet_params(args.model_type)[2]),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\r\n ])\r\n\r\n train_set = SEVDataset('/mnt/sda1/songzimeng/highlightdata/train/', transform=data_transform)\r\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)\r\n\r\n valid_set = SEVDataset('/mnt/sda1/songzimeng/highlightdata/valid/', transform=data_transform)\r\n valid_loader = DataLoader(valid_set, batch_size=args.batch_size, shuffle=True)\r\n\r\n model = EfficientNet.from_pretrained(args.model_type, num_classes=args.class_num).to(device)\r\n # model._fc.out_features = args.class_num\r\n\r\n params_to_update = model.parameters()\r\n print(\"Params to learn:\")\r\n if args.feature_extract:\r\n params_to_update = []\r\n for name, param in model.named_parameters():\r\n if param.requires_grad == True:\r\n params_to_update.append(param)\r\n print(\"\\t\", name)\r\n else:\r\n for name, param in model.named_parameters():\r\n if param.requires_grad == True:\r\n print(\"\\t\", name)\r\n\r\n criterion = LabelSmoothSoftmaxCE()\r\n optimizer = optim.Adam(params_to_update, lr=args.lr, betas=(0.9, 0.999), eps=1e-9)\r\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.7, patience=3, verbose=True)\r\n\r\n\r\n global_step = 0\r\n # evaluate(model, valid_set)\r\n #step_interval = int(64 / args.batch_size) * 25\r\n #print('step_interval: ', step_interval)\r\n\r\n best_acc = 0\r\n\r\n for epoch in range(args.num_epoch):\r\n print('epoch: ', epoch+1)\r\n\r\n losses = []\r\n total = 0\r\n correct = 0\r\n for step, samples in enumerate(train_loader, 0):\r\n model.train()\r\n imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)\r\n optimizer.zero_grad()\r\n outputs = model(imgs)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n losses.append(loss.item())\r\n\r\n _, predicted = torch.max(outputs, 1)\r\n total += labels.size(0)\r\n correct += predicted.eq(labels).cpu().sum()\r\n\r\n if (step + 1) % args.step_interval == 0:\r\n print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '\r\n % (epoch + 1, step + 1, np.mean(losses), 100. * float(correct) / float(total)))\r\n\r\n\r\n\r\n if (step + 1) % args.step_save == 0:\r\n torch.save(model, args.save_dir + \"/highlight_step_save.pt\")\r\n losses = []\r\n total = 0\r\n correct = 0\r\n\r\n with torch.no_grad():\r\n print('Evaluate')\r\n eval_correct = 0\r\n eval_total = 0\r\n evaluate_step = 0\r\n for samples in valid_loader:\r\n model.eval()\r\n imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)\r\n outputs = model(imgs)\r\n # 取得分最高的那个类 (outputs.data的索引号)\r\n _, predicted = torch.max(outputs.data, 1)\r\n eval_total += labels.size(0)\r\n eval_correct += (predicted == labels).cpu().sum()\r\n\r\n evaluate_step += 1\r\n if evaluate_step >= args.evaluate_step :\r\n break\r\n\r\n print('Evaluated acc:%.3f%%' % (100. * float(eval_correct) / float(eval_total)))\r\n acc = 100. * float(eval_correct) / float(eval_total)\r\n #scheduler.step(acc)\r\n\r\n if acc > best_acc:\r\n torch.save(model, args.save_dir + \"/highlight_best_save.pt\")\r\n best_acc = acc\r\n\r\n\r\n\r\n if (epoch + 1) % args.save_interval == 0 or epoch == 0:\r\n torch.save(model, args.save_dir + \"/highlight_{}.pt\".format(epoch + 1))\r\n\r\n if optimizer.param_groups[0]['lr'] == 0:\r\n break\r\n\r\n with torch.no_grad():\r\n print('Evaluate')\r\n eval_correct = 0\r\n eval_total = 0\r\n evaluate_step = 0\r\n for samples in valid_loader:\r\n model.eval()\r\n imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)\r\n outputs = model(imgs)\r\n # 取得分最高的那个类 (outputs.data的索引号)\r\n _, predicted = torch.max(outputs.data, 1)\r\n eval_total += labels.size(0)\r\n eval_correct += (predicted == labels).cpu().sum()\r\n\r\n evaluate_step += 1\r\n if evaluate_step >= (args.evaluate_step * 10):\r\n break\r\n\r\n print('Evaluated acc:%.3f%%' % (100. * float(eval_correct) / float(eval_total)))\r\n eval_acc = 100. * float(eval_correct) / float(eval_total)\r\n scheduler.step(eval_acc)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = get_args()\r\n train(args)\r\n", "sub_path": "pretrain/HIGHLIGHTclassifier/efficientnet_train.py", "file_name": "efficientnet_train.py", "file_ext": "py", "file_size_in_byte": 8333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 65, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.cuda.current_device", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 74, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 74, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 78, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 78, "usage_type": "name"}, {"api_name": "HIGHLIGHTdataset.SEVDataset", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 82, "usage_type": "call"}, {"api_name": "HIGHLIGHTdataset.SEVDataset", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 85, "usage_type": "call"}, {"api_name": "efficientnet_pytorch.EfficientNet.from_pretrained", "line_number": 87, "usage_type": "call"}, {"api_name": "efficientnet_pytorch.EfficientNet", "line_number": 87, "usage_type": "name"}, {"api_name": "label_smooth.LabelSmoothSoftmaxCE", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "307690580", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport chromedriver_binary # Adds chromedriver binary to path\nimport functools #for error handling\nimport re\n\nfrom time import sleep #to pause execution\n\ndef exception(fn):\n \"\"\"\n A decorator that wraps the passed in function and logs exceptions should one occur\n \"\"\"\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except NoSuchElementException:\n print(\"There was a NoSuchElementException in \" + fn.__name__) \n\n\n return wrapper\n\nclass IT_Request_Automator():\n\tbrowser = \"\"\n\tuser_name, email, phone = \"\", \"\", \"\"\n\t\"\"\"\n\tA Class for automating it requests\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.user_name = \"Jeremy Marino\"\n\t\tself.user_email = \"Jeremy@ccbf.net\"\n\t\tself.user_phone = \"951-764-2881\"\n\t\t# self.browser = \"\"\n\n\tdef start_browser(self):\n\t\tself.browser = webdriver.Chrome()\n\t\treturn self.browser\n\n\tdef is_valid_email(self, email_input):\n\t\temail_regex = re.compile(r\"\\b\\w+@{1}\\w+.{1}\\w+\\b\")\n\t\tmatch = email_regex.search(email_input)\n\t\tif match:\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_email(self):\n\t\tself.user_email = input(\"What's the email? \")\n\t\tif not (self.is_valid_email(self.user_email)):\n\t\t\tprint(\"That's not valid try again\\n\")\n\t\t\tself.get_email()\n\n\t\treturn self.user_email\n\n\tdef get_user_info(self):\n\t\t# global user_name, user_email, user_phone\n\n\t\tdefault_email = input(\"Do you want to proceed with e-mail 'Jeremy@ccbf.net'? (y/n): \")\n\t\tif default_email == \"y\":\n\t\t\tself.email = \"Jeremy@ccbf.net\"\n\t\telse: \n\t\t\tself.email = self.get_email()\n\n\t@exception\n\tdef get_request_page(self):\n\t\tself.browser.get(\"https://ccbf.atlassian.net/servicedesk/customer/portal/2/group/2/create/10002\")# go straifght to the page\n\t\treturn self.browser\n\n\t@exception\n\tdef type_email(self):\n\t\temail_field = self.browser.find_element_by_css_selector(\"#email\")\n\t\temail_field.click()\n\t\temail_field.send_keys(self.user_email)\n\n\t@exception\n\tdef type_summary(self):\n\t\tsummary_field = self.browser.find_element_by_css_selector(\"#summary\")\n\t\tsummary_field.click()\n\t\tsummary_field.send_keys(self.it_summary)\n\n\t@exception\n\tdef get_summary(self):\n\t\tself.it_summary = input(\"What's the summary?\\n\")\n\n\t@exception\n\tdef type_description(self):\n\t\tdescription_field = self.browser.find_element_by_css_selector(\"#description\")\n\t\tdescription_field.click()\n\t\tdescription_field.send_keys(self.it_description)\n\n\t@exception\n\tdef get_description(self):\n\t\tself.it_description = input(\"What's the description?\\n\")\n\ndef main():\n it_form = IT_Request_Automator()\n it_form.get_user_info()\n it_form.get_summary()\n it_form.get_description()\n global browser_automate #stops browser from closing\n browser_automate = it_form.start_browser()\n it_form.get_request_page()\n sleep(2.5); #sleep for page to load\n it_form.type_summary()\n it_form.type_email()\n it_form.type_description()\n\nmain()\n\n", "sub_path": "Forms/it-form-request.py", "file_name": "it-form-request.py", "file_ext": "py", "file_size_in_byte": 3003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 19, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 38, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "310803468", "text": "from htmlshow import *\nimport datetime\n\nimport locale\nlocale.setlocale(locale.LC_TIME, \"fr_FR\") # swedish\n'''\n# font-size:17px;\n font-family:pixlim;\n font-family:maniaccompact;\n font-family:smallishnarrow;\n font-family:consolas;\n font-family:smallishunaligne;\n font-size:0.6em;\n font-smooth: never;\n cellspacing:0;\n border-spacing: 0;\n vertical-align: top;\n font-size: 1.4em;\n # table.outer > tr td {\n border-collapse: collapse;\n\n'''\n\ncss2_old = '''\n /* ALL TABLES*/\n table {\n }\n\n /* ########## OUTER ########## */\n table.outer tr td {\n border-collapse: collapse;\n position: relative;\n vertical-align: top;\n padding: 5px 15px 5px 15px;\n }\n /*\n padding: 10px 25px 10px 25px;\n margin: 5px 15px 5px 15px auto;\n */\n\n /* ########## INNER ########## */\n table.inner {\n /* border-collapse: collapse; */\n border-collapse: collapse;\n }\n table.inner tr td.empty { border:none; }\n table.inner tr td.weekend {background: #d8d8d8;}\n table.inner tr td, table.inner tr th {\n border: 1px solid #a00;\n border-spacing: 0px;\n width: 4em;\n height: 2em;\n vertical-align: top;\n line-height: 1;\n padding: 0 0 0 3px;\n\n font-family:pixlim;\n font-size:16px;\n }\n\n /* only border between months*/\n .large_cell {\n border-bottom: solid 5px #ddd;\n }\n .large_cell_bottom {\n border-bottom: none;\n }\n\n\n .month {\n font-family: monospace;\n font-weight: bold;\n font-size: 1.2em;\n text-align: center;\n }\n .month_top {\n position: absolute;\n font-family: monospace;\n font-weight: bold;\n font-size: 1.2em;\n top:10;\n left:10;\n }\n .month_bottom {\n position: absolute;\n font-family: monospace;\n font-weight: bold;\n font-size: 1.2em;\n bottom:10;\n right:10;\n }\n\n'''\n\ncss = '''\n /* ALL TABLES*/\n\n /* ########## OUTER ########## */\n table.outer tr td {\n border-collapse: collapse;\n position: relative;\n vertical-align: top;\n padding: 2px 15px 3px 15px;\n padding: 0px 15px 0px 15px;\n }\n /*\n padding: 10px 25px 10px 25px;\n margin: 5px 15px 5px 15px auto;\n */\n\n /* ########## INNER ########## */\n table.inner {\n /* border-collapse: collapse; */\n border-collapse: collapse;\n }\n table.inner tr td.empty { border:none; }\n table.inner tr td.weekend {background: #e4e4e4 padding-box;}\n table.inner tr td, table.inner tr th {\n border: 1px solid #999;\n\n width: 2.5em;\n height: 2em;\n\n width: 4em;\n height: 1.5em;\n height: 1.65em;\n height: 1.7em;\n\n\n vertical-align: top;\n line-height: 1;\n padding: 0 0 0 3px;\n\n font-family:pixlim;\n font-size:16px;\n }\n\n /* only border between months*/\n .large_cell {\n border-right: solid 5px #ddd;\n }\n .large_cell_bottom {\n border: none;\n }\n .leftish {position:absolute; top:-3px;}\n\n .month {\n font-family: monospace;\n font-weight: bold;\n font-size: 1.2em;\n text-align: center;\n }\n\n\n'''\n\n'''\n\n# border-collapse:collapse;\n border: 1px dashed #aaa;\n'''\nnope = '''\n td {\n margin:0;\n padding:0;\n padding-left: 10px;\n padding-right:10px;\n border-right: dotted grey 1px;\n border-top: dotted grey 1px;\n\n\n }\n\n padding:3px 15px 3px 15px;\n'''\n\nyears = [y for y in range(2020,2025)]\nimport calendar\n\nfor y in years:\n start = datetime.date(y, 1, 1)\n\n week_days = [start + datetime.timedelta(days=i) for i in range(367 if calendar.isleap(y) else 366)]\n # +0 to +365, mmmmh\n\n # weeks = []\n # ar = [i for i in range(50)]\n\n # splits = [ar[7*i:7*i+7] for i in range(1+50//7)]\n # for a in splits: print(a)\n # weeks = [week_days[7*i:7*i+7] for i in range(1+len(week_days)//7)]\n # for a in weeks: print([d.strftime(\"%a %Y-%b-%d\").lower() for d in a])\n\n html = open('cal-%d.html'%y,'w', encoding='utf-8')\n\n\n html_start(html, css)\n\n text_blob = ''\n\n # text_blob += '
'\n\n # month = weeks[0][0].month\n # current_month = (2020, 1)\n\n # weekline = ['' for a in range(7)]\n whole_months = {}\n months_indexed = {}\n # for i,day in enumerate(week_days):\n for day in week_days:\n # week = (day.timetuple().tm_yday\n week = day.isocalendar()[1]\n month = (day.year, day.month)\n\n if month not in months_indexed: months_indexed[month] = {}\n if week not in months_indexed[month]:months_indexed[month][week] = []\n\n months_indexed[month][week].append(day)\n\n for month, weeks in months_indexed.items():\n if month not in whole_months: whole_months[month] = {week:[] for week in weeks}\n for week, days in weeks.items():\n for day in days:\n whole_months[month][week].append(day)\n\n month_tables = {}\n for month, weeks in whole_months.items():\n month_tables[month] = []\n for week, days in weeks.items():\n # weekline = ['' for a in range(7)]\n weekline = [' \\n' for a in range(7)]\n for day in days:\n weekline[day.weekday()] =' \\n'%(\n # weekline[day.weekday()] ='\\n'%(\n 'weekend' if day.weekday()>4 else '',\n # day.strftime(\"%m-%d\").lower()\n day.strftime(\"%d\").lower()\n # '-'\n )\n # print(weekline)\n # print(len(weekline))\n month_tables[month].append(weekline)\n\n cols = 6\n cols = 2\n cols = 4\n cols = 3\n rows = 12//cols\n # rows, cols = cols, rows\n table =[['' for a in range(cols)] for b in range(rows)]\n for col in range(1,13):\n\n weeklines = month_tables[(y,col)]\n month_name = datetime.date(y,col,1).strftime('%B').lower()\n\n joined = ''.join(['\\n%s\\n'%''.join(weekline) for weekline in weeklines])\n\n # row, column = (col-1)//rows, (col-1)%rows\n column, row = (col-1)//rows, (col-1)%rows\n month = '
%s
\\n'%month_name\n with_month = month+'
%s
%s
\\n%s
\\n'%joined\n table[row][column] = with_month\n\n table_html = ''\n row_length = len(table[0])\n for i, row in enumerate(table):\n # table_html+=tr(row)\n # len(table)-1\n table_html += '\\n%s'%''.join(['%s\\n'%(\n 'large_cell_bottom' if j == row_length-1 else 'large_cell',\n # j,\n a) for j,a in enumerate(row)])\n\n # text_blob +='%s
'%tr(columns)\n # text_blob += '
%d

\\n%s
\\n'%(y,table_html)\n text_blob += '
%d
\\n%s
\\n'%(y,table_html)\n\n\n html.write(text_blob)\n html_finish(html)\n", "sub_path": "_projlab/calendar-jog/cal2020-2.py", "file_name": "cal2020-2.py", "file_ext": "py", "file_size_in_byte": 7239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "locale.setlocale", "line_number": 5, "usage_type": "call"}, {"api_name": "locale.LC_TIME", "line_number": 5, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 183, "usage_type": "call"}, {"api_name": "calendar.isleap", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "555225421", "text": "import curses\nfrom windows.window import Window\n\n\nclass StatusWin(Window):\n def __init__(self, lines, cols, begin_y, begin_x, parent):\n super().__init__(lines, cols, begin_y, begin_x, parent)\n self.controller = self.parent.controller\n self.status = \"Default mode\"\n self.currentInput = \"\"\n self.mode = 0\n\n def render(self):\n if self.mode == 0:\n self.renderStatus()\n elif self.mode == 1:\n self.renderCommand()\n\n def renderStatus(self):\n width = self.scr.getmaxyx()[1]\n hint = \"Text Editor. \"\n hint += \"t - Typing Mode, c - Command Mode, ESC - Normal Mode.\"\n hint = hint.ljust(width)\n self.scr.addstr(0, 0, hint, curses.A_REVERSE)\n self.scr.addstr(1, 0, self.status.ljust(width - 1),\n curses.A_REVERSE)\n position = 'LN: ' + str(self.parent.bufferWin.getLine() + 1)\n position += 'COL:' + str(self.parent.bufferWin.getCol())\n self.scr.addstr(1, width - 1 - len(position), position,\n curses.A_REVERSE)\n\n def renderCommand(self):\n height = self.scr.getmaxyx()[0]\n width = self.scr.getmaxyx()[1]\n hint = \"Enter command: \".ljust(width)\n self.scr.addstr(height - 2, 0, hint, curses.A_REVERSE)\n self.scr.addstr(height - 1, 0, \":\" + self.currentInput)\n height = self.scr.getmaxyx()[0]\n self.scr.move(height - 1, len(self.currentInput) + 1)\n self.scr.cursyncup()\n\n def handleInputCommand(self, c):\n if c == 10 or c == curses.KEY_ENTER:\n cmd = self.currentInput.lower()\n if cmd.find(\"connect\") == 0:\n s = self.currentInput.split(\" \")\n if len(s) == 3:\n self.controller.connectToServer(s[1], int(s[2]))\n if len(s) == 2:\n if s[1].find(\":\") == -1:\n self.controller.connectToServer(s[0], 9999)\n else:\n s = s[1].split(\":\")\n self.controller.connectToServer(s[0], int(s[1]))\n else:\n self.controller.connectToServer(\"localhost\", 9999)\n self.parent.bufferWin.offset = 0\n self.parent.bufferWin.y = 0\n self.parent.bufferWin.x = 0\n if cmd.find(\"open\") == 0:\n if len(cmd.split(\" \")) != 2:\n self.mode = 0\n newStatus = \"open document.\"\n newStatus += \" usage: open \"\n self.setStatus(newStatus)\n return\n self.controller.openFile(cmd.split(\" \")[1])\n if cmd.find(\"save\") == 0:\n if len(cmd.split(\" \")) != 2:\n self.mode = 0\n newStatus = \"save document to file.\"\n newStatus += \" usage: save \"\n self.setStatus(newStatus)\n return\n self.controller.saveFile(cmd.split(\" \")[1])\n if cmd.find(\"setname\") == 0:\n if len(cmd.split(\" \")) != 2:\n self.mode = 0\n newStatus = \"Set name to display in history and access\"\n newStatus += \". name \"\n self.setStatus(newStatus)\n return\n self.controller.setName(cmd.split(\" \")[1])\n self.setStatus(\"New name: \" + self.controller.name)\n if cmd == \"getname\":\n self.setStatus(\"Name: \" + self.controller.name)\n if cmd == \"history\":\n f = open(\"history.txt\", \"w+\")\n for op in self.controller.history:\n f.write(\"-----\\r\")\n for key in op.keys():\n f.write(key + ':' + str(op[key]) + \"\\r\")\n f.close()\n self.mode = 0\n elif c == 8 or c == curses.KEY_BACKSPACE:\n self.currentInput = self.currentInput[:len(self.currentInput)-1]\n elif c == 27:\n self.mode = 0\n return\n else:\n self.currentInput += chr(c)\n\n def handleInput(self, keycode):\n if self.mode == 1:\n self.handleInputCommand(keycode)\n return\n key = chr(keycode).lower()\n if key == \"c\":\n self.mode = 1\n self.currentInput = \"\"\n if key == \"t\":\n self.parent.activeWindow = self.parent.bufferWin\n self.setStatus(\"Typing mode\")\n\n def setStatus(self, text):\n self.status = text\n", "sub_path": "windows/status_win.py", "file_name": "status_win.py", "file_ext": "py", "file_size_in_byte": 4608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "windows.window.Window", "line_number": 5, "usage_type": "name"}, {"api_name": "curses.A_REVERSE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "curses.A_REVERSE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "curses.A_REVERSE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "curses.A_REVERSE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 43, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 95, "usage_type": "attribute"}]} +{"seq_id": "50993001", "text": "# -*- coding: utf-8 -*-\n\"\"\" This code is open-sourced software licensed under the MIT license\"\"\" \n\"\"\" Copyright 2019 Marta Cortes, UbiComp - University of Oulu\"\"\" \n\"\"\" Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\" \n\"\"\" \nDISCLAIMER\nThis code is used to crawl/parse data from several files from Thessaloniki municipality (https://opendata.thessaloniki.gr). By downloading this code, you agree to contact the corresponding data provider and verify you are allowed to use (including, but not limited, crawl/parse/download/store/process) all data obtained from the data source.\n\"\"\" \n\n\"\"\" Download excel files and transform to correct format in csv files. \"\"\"\n\"\"\" \"\"\"\n\"\"\" Excel files are linked in href attribute of elements in the given URL (Not nested URLs)\"\"\"\n\"\"\" Each station, in stations array, is linked to a numerical code in this file\"\"\"\n\"\"\" Longitude and latitude and location (as descriptive name) are added to each row of each station\"\"\"\n\"\"\" Greek names for date and weekday are translated\"\"\"\n\n# Code: thess_env_cityofthess_dailyyearly \n# Code with numbering: thess_env_cityofthess_dailyyearly_1, thess_env_cityofthess_dailyyearly_2, thess_env_cityofthess_dailyyearly_3, thess_env_cityofthess_dailyyearly_4, thess_env_cityofthess_dailyyearly_5, thess_env_cityofthess_dailyyearly_6 \n\n #Stations (latitude, longitude):\n #Egnatia (Στ. ΕΓΝΑΤΙΑΣ): Egnatia and I. Dragoumi (1st Municipal District) (40.63753, 22.94095): thess_env_cityofthess_dailyyearly_1\n #Martiou (Στ. 25ης ΜΑΡΤΙΟΥ): 25 March and Karakasi (5th Municipal District) (40.60102, 22.96017): thess_env_cityofthess_dailyyearly_2\n #Lagada (Στ. ΛΑΓΚΑΔΑ): Lagada and Koutifari (2nd Municipal District) (40.65233, 22.93514): thess_env_cityofthess_dailyyearly_3\n #Eptapyrgio (Στ. ΕΠΤΑΠΥΡΓΙΟΥ): Agia Anastasia and Agrafon (3rd Diamersima) (40.64407, 22.95837): thess_env_cityofthess_dailyyearly_4 \n #Malakopi (Toumba) (Στ. ΜΑΛΑΚΟΠΗΣ): Harisio Girokomio (Dimitrios Charisis) (4th Diamersima) (40.61637, 22.98233): thess_env_cityofthess_dailyyearly_5\n #Dimarxeio (Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.): King's George A (1st Diamersima) (40.62381, 22.95312): thess_env_cityofthess_dailyyearly_6\n\n #NO, NO2, O3, PM10, PM2.5, CO, SO2\n #μg/m3,μg/m3,μg/m3,μg/m3,μg/m3,mg/m3,μg/m3\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen, urlretrieve\nimport time\nimport os\nfrom collections import deque\nimport pandas as pd\nimport shutil\nimport uuid\nfrom kafka import KafkaProducer\nfrom kafka.errors import KafkaError\n\nimport logging\n\n__author__ = \"Marta Cortes\"\n__mail__ = \"marta.cortes@oulu.fi\"\n__origin__ = \"UbiComp - University of Oulu\"\n\n\nlogging.basicConfig(level=logging.INFO)\ncode = 'thess_env_cityofthess_dailyyearly'\nstations = {'Στ. ΕΓΝΑΤΙΑΣ':[40.63753, 22.94095],'Στ. 25ης ΜΑΡΤΙΟΥ':[40.60102, 22.96017],'Στ. ΛΑΓΚΑΔΑ':[40.65233, 22.93514],'Στ. ΕΠΤΑΠΥΡΓΙΟΥ':[40.64407, 22.95837],'Στ. ΜΑΛΑΚΟΠΗΣ':[40.61637, 22.98233],'Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.':[40.62381, 22.95312]}\nnames = {'Στ. ΕΓΝΑΤΙΑΣ':'Egnatia','Στ. 25ης ΜΑΡΤΙΟΥ':'Martiou','Στ. ΛΑΓΚΑΔΑ':'Lagada','Στ. ΕΠΤΑΠΥΡΓΙΟΥ':'Eptapyrgio','Στ. ΜΑΛΑΚΟΠΗΣ':'Malakopi','Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.':'Dimarxeio'}\norigin_url = 'https://opendata.thessaloniki.gr/el/dataset/%CE%BC%CE%B5%CF%84%CF%81%CE%AE%CF%83%CE%B5%CE%B9%CF%82-%CE%B4%CE%B7%CE%BC%CE%BF%CF%84%CE%B9%CE%BA%CE%BF%CF%8D-%CE%B4%CE%B9%CE%BA%CF%84%CF%8D%CE%BF%CF%85-%CF%83%CF%84%CE%B1%CE%B8%CE%BC%CF%8E%CE%BD-%CE%B5%CE%BB%CE%AD%CE%B3%CF%87%CE%BF%CF%85-%CE%B1%CF%84%CE%BC%CE%BF%CF%83%CF%86%CE%B1%CE%B9%CF%81%CE%B9%CE%BA%CE%AE%CF%82-%CF%81%CF%8D%CF%80%CE%B1%CE%BD%CF%83%CE%B7%CF%82-%CF%84%CE%BF%CF%85-%CE%B4%CE%AE%CE%BC%CE%BF%CF%85-%CE%B8%CE%B5%CF%83%CF%83%CE%B1%CE%BB%CE%BF%CE%BD%CE%AF%CE%BA%CE%B7%CF%82' \n#\nl_temp_path = './temp/'\nl_final_path = './data/'\n\nclass thess_env_cityofthess_dailyyearly (object):\n\t\n\tdef __init__(self, url):\n\t\tself.url = url\n\t\tself.xlfnames = []\n\t\tself.url_queue = deque([])#doble-ended queu\n\t\tself.folder = l_temp_path\n\n\tdef get_page(self, url):\n\t\t\"\"\" Downloiad the page at given URL\"\"\"\n\t\t\"\"\" @param url: Url we want to crawl\"\"\"\n\t\t\"\"\" @type url: String \"\"\"\n\t\t\"\"\"@return the page\"\"\"\n\t\ttry:\n\t\t\tu = urlopen(url)\n\t\t\thtml = u.read().decode('utf-8')\n\t\texcept Exception as e:\n\t\t\tlogging.exception(e)\n\t\tfinally:\n\t\t\tprint(\"Closing\")\n\t\t\tu.close()\n\t\t\treturn html\n\n\tdef get_soup(self, html):\n\t\t\"\"\"Returns the BeautifulSoup object of the given page\"\"\"\n\t\tif html is not None:\n\t\t\tsoup = BeautifulSoup(html, \"html.parser\")\n\t\t\treturn soup\n\t\telse:\n\t\t\treturn\n\n\tdef get_links(self, soup):\n\t\t\"\"\"Get the links of interest from the given Beuti\"\"\"\n\t\t\"\"\" @param soup: BeautifulSoup object that cointains the targeted links \"\"\"\n\t\t\"\"\" @type soup: BeautifulSoup object \"\"\"\n\t\tfor link in soup.select('a[href^=\"https://\"]'):#All links which have a href element\n\t\t\thref = link.get('href')#The actually href element of the link\n\t\t\tif not any(href.endswith(x) for x in ['.csv','.xls','.xlsx']):\n\t\t\t\tprint(\"No excel\")\n\t\t\t\tcontinue\n\t\t\tif not href in self.url_queue:\n\t\t\t\tself.url_queue.append(href)#Add the URL to our queue\n\n\tdef get_files(self):\n\t\t\"\"\"Create a temp folder to download\"\"\"\n\t\t#self.folder= +str(int(time.time()))\n\t\tif not os.path.exists(self.folder):\n\t\t\tos.mkdir(self.folder)\n\t\twhile len(self.url_queue): #If we have URLs to crawl - we crawl\n\t\t\thref = self.url_queue.popleft() #We grab a URL from the left of the list\n\t\t\tfilename = href.rsplit('/', 1)[-1]\n\t\t\tprint(\"Downloading %s to %s...\" % (href, filename) )\n\t\t\tfullname = os.path.join(self.folder, filename) \n\t\t\turlretrieve(href, fullname)\n\t\t\tself.xlfnames.append(filename)\n\n\tdef run_downloader(self):\n\t\t\"\"\"downloads the htmlpage and looks for the links with excel files\"\"\"\n\t\t\"\"\"calls to the file downloader\"\"\"\n\t\thtml = self.get_page(self.url)\n\t\tsoup = self.get_soup(html)\n\t\tif soup is not None: #If we have soup -\n\t\t\tself.get_links(soup)\n\t\t\tself.get_files()\n\n\tdef parse_sheet(self,xl, sheet):\n\t\t\"\"\" @param xl: excel file object \"\"\"\n\t\t\"\"\" @type xl: dataframe \"\"\"\n\t\t\"\"\" @param sheet: sheet object \"\"\"\n\t\t\"\"\" @type sheet: dataframe \"\"\"\n\t\tif sheet in stations.keys():\n\t #Create dataframe. Note, put this out of the loop to write all the sheets in same csv file\n\t\t\tdf = pd.DataFrame()\n\t #print(sheet.encode('utf-8'))\n\n\t\t\tdf_tmp = xl.parse(sheet)\n\n\t #Clean the data\n\t #replace return, remove units\n\t\t\tdf_tmp.columns = df_tmp.columns.str.replace('\\n',' ').str.strip(' μg/m3').str.strip(' mg/m3')\n\t #select the columns of interest\n\t\t\tdf_tmp = df_tmp.filter(regex='(NO|NO2|O3|PM10|PM2,5|CO|SO2|Ημερο - μηνία|Ημέρα)')\n\t #df_tmp.columns = df_tmp.columns.str.strip(' μg/m3').str.strip(' mg/m3')\n\t\t\t#correct format of information\n\t\t\tdf_tmp['Ημέρα']= df_tmp['Ημέρα'].dt.day_name()\n\t\t\tdf_tmp['Latitude'] =stations[sheet][0]\n\t\t\tdf_tmp['Longitude'] =stations[sheet][1]\n\t\t\tdf_tmp['Location'] =names[sheet]\n\n\t\t\t#renaming fields in greek\n\t\t\tdf_tmp.rename(columns={'Ημερο - μηνία':'Date', 'Ημέρα':'Weekday'},inplace=True)\n\t \n\t #Directory name by code/codenumber \n\t\t\touterdir = l_final_path +code\n\t\t\tif not os.path.exists(outerdir):\n\t\t\t\tos.mkdir(outerdir)\n\t\t\toutdir = outerdir+'/'+code+'_'+str(list(stations).index(sheet)+1)\n\t\t\tif not os.path.exists(outdir):\n\t\t\t\tos.mkdir(outdir)\n\t\t\tdf = df.append(df_tmp, ignore_index=True)\n\t #Write to the csv file. Note, put this out of the loop to write all the sheets in same csv file\n\t\t\tcsvfile = csvfile = str(uuid.uuid4()) + \".csv\"#sheet+'.csv'\n\t\t\tfullname = os.path.join(outdir, csvfile) \n\t\t\tdf.to_csv(fullname, mode='a', encoding='utf-8-sig', index=False)#mode a is append\n\n\tdef parse_files (self):\n\t\t\"\"\" calls parse_sheet to each sheet in the given file \"\"\"\n\t\t\"\"\" @param name: name of the file \"\"\"\n\t\t\"\"\" @type name: string \"\"\"\n\t\tfor fileName in self.xlfnames:\n\t\t\txlfname = self.folder+'/'+fileName#\n\t\t\txl = pd.ExcelFile(xlfname)\n\t\t\tfor sheet in xl.sheet_names:\n\t\t\t\tself.parse_sheet(xl,sheet)\n\n\n\tdef producer(self,topic,msg,e=None):\n\t\t\"\"\" This function sends data to kafka bus\"\"\"\n\t\tproducer = KafkaProducer(bootstrap_servers=['HOST_IP'], api_version=(2, 2, 1))\n\t\tmsg_b = str.encode(msg)\n\t\tproducer.send(topic, msg_b).get(timeout=30)\n\t\tif (e):\n\t\t\tlogging.exception('exception happened')\n\n\nif __name__ == '__main__':\n\ta = thess_env_cityofthess_dailyyearly(origin_url)\n\tif (a.run_downloader()):\n\t\tif(a.parse_files()):\n\t\t\ta.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_INGESTION\",'City of Thessaloniki environmental data ingested to HDFS')\n", "sub_path": "Environmental/thess_env_cityofthess_dailyyearly.py", "file_name": "thess_env_cityofthess_dailyyearly.py", "file_ext": "py", "file_size_in_byte": 9577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.basicConfig", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 52, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 78, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 159, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pandas.ExcelFile", "line_number": 172, "usage_type": "call"}, {"api_name": "kafka.KafkaProducer", "line_number": 179, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "248345009", "text": "from collections import OrderedDict\nfrom random import randint\n\nmydict = OrderedDict()\nmydict1 = dict()\n\nfor i in range(1,31,1):\n n = randint(1,101)\n mydict[i] = i*2\n mydict1[i] = i*2\n\n# for i in range(2,31,3):\n# n = randint(1,101)\n# mydict[i] = i*2\n# mydict1[i] = i*2\n#\n# for i in range(3,31,3):\n# n = randint(1,101)\n# mydict[i] = i*2\n# mydict1[i] = i*2\n\nfor k,v in mydict.items():\n print(\" for key {} value is {} \".format(k,v))\n\n\nfor k,v in mydict1.items():\n print(\" for key {} value is {} \".format(k,v))\n\n#print(mydict1)\n\n\n", "sub_path": "com/ishaan/python/CollectionsExamples/OrderedDicts.py", "file_name": "OrderedDicts.py", "file_ext": "py", "file_size_in_byte": 565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.OrderedDict", "line_number": 4, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "264514867", "text": "\"\"\"\n\nTo draw Ref marker and Stop marker on A4 paper\n(A4 size = 210 x 297 mm)\n\n\"\"\"\n\nimport cv2\nimport numpy as np\n\npaper_size = 2100, 2100\ncenter = paper_size[0]//2, paper_size[1]//2\n\ncross_size = 20\nnormal_marker = np.ones(paper_size)*255\ncv2.circle(normal_marker, center, 940, (180, 180, 180), thickness=2)\ncv2.circle(normal_marker, center, 600, (0, 0, 0), thickness=-1)\ncv2.circle(normal_marker, center, 380, (255, 255, 255), thickness=-1)\ncv2.circle(normal_marker, center, 190, (0, 0, 0), thickness=-1)\nrectangle_points = (center[0], center[1] - cross_size), (center[0], center[1] + cross_size)\ncv2.rectangle(normal_marker, rectangle_points[0], rectangle_points[1], (200, 200, 200), cross_size//2)\nrectangle_points = (center[0] - cross_size, center[1]), (center[0] + cross_size, center[1])\ncv2.rectangle(normal_marker, rectangle_points[0], rectangle_points[1], (200, 200, 200), cross_size//2)\n\nstop_marker = np.ones(paper_size)*255\ncv2.circle(stop_marker, center, 940, (0, 0, 0), thickness=-1)\ncv2.circle(stop_marker, center, 600, (255, 255, 255), thickness=-1)\ncv2.circle(stop_marker, center, 380, (0, 0, 0), thickness=-1)\ncv2.circle(stop_marker, center, 190, (255, 255, 255), thickness=-1)\n\nmarkers = np.bmat([[normal_marker, normal_marker], [stop_marker, stop_marker]])\ncv2.imwrite(\"New_markers_A4.jpg\", markers)\n", "sub_path": "draw_marker.py", "file_name": "draw_marker.py", "file_ext": "py", "file_size_in_byte": 1319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.ones", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.bmat", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "237554207", "text": "import logging\nimport json\nimport sys\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta, time\nfrom functools import wraps\n\nfrom telegram.ext import CommandHandler, Updater\nfrom telegram import ParseMode\n\nfrom dbmanager import DBManager as dbm\nfrom extras import *\n\n# load config\nwith open(\"config.json\") as f:\n config = json.load(f)\n\n#logging\nlog_config = config.get(\"log\")\n\nLOGFILE = log_config.get(\"debug\")\nBOTLOG = log_config.get(\"filename\")\nLOGFORMAT = log_config.get(\"logformat\")\nLOGLEVEL = logging.INFO\n\nlogging.basicConfig(format=LOGFORMAT, level=LOGLEVEL, filename=LOGFILE)\nlogger = logging.getLogger(__name__)\n\n#handlers\nfilehandler = logging.FileHandler(BOTLOG)\nfilehandler.setLevel(LOGLEVEL)\n\nformatter = logging.Formatter(LOGFORMAT)\nfilehandler.setFormatter(formatter)\n\nlogger.addHandler(filehandler)\n\n\nPARSEMODE = ParseMode.MARKDOWN\n\n# named tuple for unpacked update\nUpdate = namedtuple('Update', 'username, user_id, text, date')\n\n\ndef help(func):\n @wraps(func)\n def wrapper(*a, **kw):\n update = a[1]\n text = update.message.text.split()\n if len(text) == 2 and text[1] in ['help', 'h']:\n helptext = helpdata.get(func.__name__)\n update.message.reply_text(helptext, parse_mode=PARSEMODE)\n else:\n return func(*a, **kw)\n return wrapper\n\n\ndef up_data(update):\n \"\"\"Convenience function to unpack data from `telegram.Update`\n\n Returns\n `Update`\n \"\"\"\n message = update.message\n\n username = message.from_user.username\n user_id = message.from_user.id\n date = message.date\n text = message.text\n\n return Update._make([username, user_id, text, date])\n\n\ndef start(bot, update):\n upd = up_data(update)\n available_commands = \"\\n\".join([\"`/add`\", \"`/tasks`\", \"`/del`\", \"`/edit`\", \"`/done`\"])\n\n update.message.reply_text(STARTTEXT.format(available_commands), parse_mode=PARSEMODE)\n logger.info(f\"/start by '{upd.user_id}:{upd.username}'\")\n\n\n@help\ndef add_task(bot, update):\n upd = up_data(update)\n \n # parse input\n message = upd.text\n message = message.split()[1:]\n\n parsed = parse_date(message, update)\n\n if not parsed:\n update.message.reply_text(\"Specified timeperiod not found!\")\n return\n\n message = parsed[1]\n \n day = datetime.strftime(parsed[0], DATEFORMAT)\n \n # add to db\n with dbm(upd.user_id) as db:\n db.add(day, message)\n\n logger.info(f\"Adding '{message}' for user '{upd.user_id}:{upd.username}' to '{day}'\")\n update.message.reply_text(\"Updating tasklist ...\")\n\n\n@help\ndef get_task(bot, update):\n upd = up_data(update)\n\n reply = \"\"\n message = upd.text.split()[1:]\n with dbm(upd.user_id) as db:\n if not message:\n data = db.get()\n day = datetime.strftime(upd.date, DATEFORMAT) # default get today\n else:\n day, _ = parse_date(message, update)\n day = datetime.strftime(day, DATEFORMAT)\n data = db.get(day)\n if not data:\n reply += f\"*{day}* - \"\n \n if not data:\n reply += \"*Todo List* is empty!\"\n elif len(data.keys()) == 1:\n reply += f\"*{day}*\\n\"\n try:\n data = data['tasks']\n except KeyError:\n try:\n data = data[day]['tasks']\n except KeyError:\n day, data = list(data.items())[0]\n data = data['tasks']\n reply = f\"*{day}*\\n\"\n \n for num, task in data.items():\n if task['done']:\n reply += f\"`{num})` \\u2705 \"\n else:\n reply += f\"`{num})` \\u274c \"\n reply += f\"{task['text']}\\n\"\n\n else:\n data = data.items()\n items = [(day, day_data) for day, day_data in data]\n items.sort(key=lambda x: x[0]) # sort by date ascending\n\n days = []\n for day, data in items:\n reply_piece = f\"*{day}*\\n\"\n for num, task in data['tasks'].items():\n if task['done']:\n reply_piece += f\"`{num})` \\u2705 \"\n else:\n reply_piece += f\"`{num})` \\u274c \"\n reply_piece += f\"{task['text']}\\n\"\n days.append(reply_piece)\n\n reply += \"\\n\".join(days)\n\n update.message.reply_text(reply, parse_mode=PARSEMODE)\n logger.info(f\"Getting tasks for '{upd.user_id}:{upd.username}'\")\n\n@help\ndef delete_task(bot, update):\n upd = up_data(update)\n day = datetime.strftime(upd.date, DATEFORMAT)\n reply = \"\"\n\n message = upd.text.split()[1:]\n\n if not message:\n reply += \"Tell me what to delete.\"\n logger.debug(\"/delete command empty\")\n update.message.reply_text(reply)\n return\n\n with dbm(upd.user_id) as db:\n date_match = re.match(DATEREGEX, message[0])\n if len(message) == 1:\n if message[0] == 'all':\n db.delete(force=True)\n reply += \"Deleting database\"\n logger.info(\"Deleting all tasks for '{upd.user_id}:{upd.username}'\")\n\n # Without specifying date default delete task from today\n if message[0].isdigit():\n try:\n db.delete(day, message[0])\n reply += f\"Deleting task {message[0]} from *today*\"\n logger.info(f\"Deleting '{message[0]}' on '{day}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task {message[0]} in list {day} not found!\"\n \n if date_match:\n if message[0] in tomorrow:\n message[0] = datetime.strftime(upd.date+timedelta(days=1), DATEFORMAT)\n try:\n if message[0] == 'today':\n db.delete(day)\n reply += \"Deleting *today*\"\n else:\n db.delete(message[0])\n reply += f\"Deleting day *{message[0]}*\"\n logger.info(f\"Deleting '{message[0]}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"{message[0]} not found!\"\n\n if not reply:\n reply += f\"\\\"{message[0]}\\\" not found!\"\n\n \n else:\n if not date_match:\n reply += f\"{message[0]} not found!\"\n else:\n if message[0] in tomorrow:\n message[0] = datetime.strftime(upd.date+timedelta(days=1), DATEFORMAT)\n if message[1].isdigit():\n try:\n db.delete(message[0], message[1])\n reply += f\"Deleting task {message[1]} from {message[0]}\"\n logger.info(f\"Deleting '{message[1]}' from '{message[0]}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task {message[1]} not found in {message[0]}\"\n\n update.message.reply_text(reply, parse_mode=PARSEMODE)\n\n@help\ndef edit_task(bot, update):\n upd = up_data(update)\n day = datetime.strftime(upd.date, DATEFORMAT)\n reply = \"\"\n \n message = upd.text.split()[1:]\n if not message:\n reply += \"Tell me what task to edit\"\n\n elif len(message) < 2:\n reply += \"I didn't get that :(\\nType: /edit _help_\"\n else:\n with dbm(upd.user_id) as db:\n\n if message[0].isdigit():\n text = \" \".join(message[1:])\n try:\n db.edit(day, message[0], text)\n reply += f\"Editing task {message[0]} on {day}\"\n logger.info(f\"Editing '{message[1]}' from '{day}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task {message[0]} not found!\"\n else:\n if not message[1].isdigit():\n reply += f\"Second argument should be _task number_\\nType: /edit _help_\"\n else:\n time = message[0]\n date_match = re.match(DATEREGEX, time)\n if date_match:\n if time in tomorrow:\n time = upd.date + timedelta(days=1)\n time = str(time.date())\n else:\n update.message.reply_text(f\"*\\\"{time}\\\"* not found!\", parse_mode=PARSEMODE)\n return\n \n\n text = \" \".join(message[2:])\n try:\n db.edit(time, message[1], text)\n reply += f\"Editing task {message[1]} on {time}\"\n logger.info(f\"Editing '{message[1]}' from '{time}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task _{message[1]}_ on *{time}* not found!\"\n\n update.message.reply_text(reply, parse_mode=PARSEMODE)\n\n\n\n@help\ndef done_task(bot, update):\n upd = up_data(update)\n time = datetime.strftime(upd.date, DATEFORMAT)\n reply = \"\"\n\n message = upd.text.split()[1:]\n\n if not message:\n reply += \"Which task?\"\n else:\n with dbm(upd.user_id) as db:\n if message[0].isdigit():\n number = message[0]\n try:\n done = db.done(time, number)\n reply += f\"Marking task {number} \"\n if done:\n reply += \"*DONE*\"\n logmessage = 'DONE'\n else:\n reply += \"*UNDONE*\"\n logmessage = 'UNDONE'\n\n logger.info(f\"Marking '{number}' {logmessage} on '{time}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task {number} not found!\"\n else:\n time = message[0]\n number = message[1]\n \n if not number.isdigit():\n update.message.reply_text(f\"*{number}* is not a digit!\")\n return\n\n date_match = re.match(DATEREGEX, time)\n if date_match:\n if time in tomorrow:\n time = datetime.strftime(upd.date + timedelta(days=1), DATEFORMAT)\n time = str(time)\n try:\n done = db.done(time, number)\n reply += f\"Marking task {number} on {time} \"\n if done:\n reply += \"*DONE*\"\n logmessage = 'DONE'\n else:\n reply += \"*UNDONE*\"\n logmessage = 'DONE'\n\n logger.debug(f\"Marking '{number}' {logmessage} on '{time}' for '{upd.user_id}:{upd.username}'\")\n except KeyError:\n reply += f\"Task {number} on {time} not found!\"\n else:\n reply += \"*\\\"{time}\\\"* not found!\"\n\n\n update.message.reply_text(reply, parse_mode=PARSEMODE)\n pass\n\n \n\ndef daily_maintenance(bot, job):\n \"\"\"Moves all tasks from today to day after that at the end of the day\"\"\"\n\n dtoday = datetime.today() \n #dtoday = datetime.today() - timedelta(days=1)\n today = datetime.strftime(dtoday, DATEFORMAT)\n tomorrow = datetime.strftime(dtoday + timedelta(days=1), DATEFORMAT) \n\n with dbm(upd.user_id) as db:\n today_data = db.get(today)['tasks']\n db.add(tomorrow, today_data)\n db.delete(today)\n \n message = f\"Moved {today} data to {tomorrow} at {dtoday.time().strftime('%H:%M:%S')}\" \n logger.info(message)\n bot.send_message(chat_id=config['auth']['myid'], text=message)\n\n\ndef parse_date(datestring: list, update):\n \"\"\"Calculates datetime.timedelta from natural input.\n If no input is found, defaults to today. \n\n Returns:\n List[datetime.datetime, str(message)]\n \"\"\"\n\n today = datetime.today()\n accepted_keywords = {'today': today,\n 'tomorrow': today + timedelta(days=1), \n 'tmr': today + timedelta(days=1)}\n \n response = []\n wordsused = 0\n\n if datestring[0] in accepted_keywords.keys():\n response.append(accepted_keywords[datestring[0]])\n wordsused += 1\n\n elif datestring[0] == \"in\":\n # Expected pattern is: int(n) str(timeperiod)\n # e.g. 2 days | 5 w | 3 months | 10 mins\n\n #make sure first arg is a number\n if not datestring[1].isdigit():\n update.message.reply_text(\"argument is not a digit\")\n return None\n\n test = \" \".join(datestring[1:3])\n match = match_re(test)\n\n # regext test\n if not match:\n return None\n \n num, period = datestring[1:3]\n period = period.lower()\n num = int(num)\n\n if period[:2] == 'mo': # handle minute & month collision\n delta = timeperiods[period[:2]](num)\n else:\n delta = timeperiods[period[0]](num)\n response.append(today + delta)\n wordsused += 3\n\n else:\n response.append(today)\n \n response.append(\" \".join(datestring[wordsused:]))\n return response\n\n\nif __name__ == \"__main__\":\n auth = config.get(\"auth\")\n con = config.get(\"con\")\n args = sys.argv[1:]\n\n updater = Updater(token=auth.get(\"token\"))\n dispatcher = updater.dispatcher\n jobq = updater.job_queue\n\n dispatcher.add_handler(CommandHandler('start', start))\n dispatcher.add_handler(CommandHandler('add', add_task))\n dispatcher.add_handler(CommandHandler('tasks', get_task))\n dispatcher.add_handler(CommandHandler('del', delete_task))\n dispatcher.add_handler(CommandHandler('edit', edit_task))\n dispatcher.add_handler(CommandHandler('done', done_task))\n\n\n #jobs\n #jobq.run_daily(daily_maintenance, time=time(0,1))\n #jobq.run_repeating(daily_maintenance, first=0, interval=600)\n\n if args:\n updater.start_webhook(listen=\"0.0.0.0\",\n port=con.get('port'),\n url_path=con.get('path'),\n key=con.get('key'),\n cert=con.get('cert'),\n webhook_url=con.get('url'))\n else:\n updater.start_polling()\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 14398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 33, "usage_type": "call"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 39, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 42, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "dbmanager.DBManager", "line_number": 101, "usage_type": "call"}, {"api_name": "dbmanager.DBManager", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 117, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 170, "usage_type": "name"}, {"api_name": "dbmanager.DBManager", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 200, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 221, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 235, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 235, "usage_type": "name"}, {"api_name": "dbmanager.DBManager", "line_number": 245, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 259, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 260, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 262, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 263, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 264, "usage_type": "name"}, {"api_name": "datetime.time.date", "line_number": 264, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 266, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 272, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 273, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 274, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 276, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 285, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 285, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 285, "usage_type": "name"}, {"api_name": "dbmanager.DBManager", "line_number": 293, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 297, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 306, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 310, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 317, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 319, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 320, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 320, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 320, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 320, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 321, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 323, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 324, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 332, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 334, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 347, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 349, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 349, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 350, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 350, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 350, "usage_type": "call"}, {"api_name": "dbmanager.DBManager", "line_number": 352, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 370, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 370, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 372, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 373, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 419, "usage_type": "attribute"}, {"api_name": "telegram.ext.Updater", "line_number": 421, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 425, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 426, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 427, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 428, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 429, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 430, "usage_type": "call"}]} +{"seq_id": "535691735", "text": "# coding: utf8\nfrom __future__ import unicode_literals\nimport argparse\nimport os\nimport sys\nimport traceback\n\nfrom beastling.beastxml import BeastXml\nfrom beastling.report import BeastlingReport\nfrom beastling.report import BeastlingGeoJSON\nimport beastling.configuration\nfrom beastling.extractor import extract\n\n\ndef errmsg(msg):\n sys.stderr.write(msg)\n\n\ndef main(*args):\n\n # Parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"config\",\n help=\"Beastling configuration file(s) (or XML file if --extract is used)\",\n default=None,\n nargs=\"+\")\n parser.add_argument(\n \"--extract\",\n default=False,\n action=\"store_true\",\n help=\"Extract configuration file (and possibly data files) from a BEASTling-generated XML file.\")\n parser.add_argument(\n \"--report\",\n default=False,\n action=\"store_true\",\n help=\"Save a high-level report on the analysis as a Markdown file.\")\n parser.add_argument(\n \"--language-list\",\n default=False,\n action=\"store_true\",\n help=\"Save a list of languages in the analysis as a plain text file.\")\n parser.add_argument(\n \"-o\", \"--output\",\n help=\"Output filename, for example `-o analysis.xml`\",\n default=None)\n parser.add_argument(\n \"--overwrite\",\n help=\"Overwrite an existing configuration file.\",\n default=False,\n action=\"store_true\")\n parser.add_argument(\n \"--stdin\",\n help=\"Read data from stdin.\",\n default=False,\n action=\"store_true\")\n parser.add_argument(\n \"--prior\", \"--sample-from-prior\", \"-p\",\n help=\"Generate XML file which samples from the prior, not posterior.\",\n default=False,\n action=\"store_true\")\n parser.add_argument(\n \"-v\", \"--verbose\",\n help=\"Display details of the generated analysis.\",\n default=False,\n action=\"store_true\")\n args = parser.parse_args(args or None)\n if args.extract:\n do_extract(args)\n else:\n do_generate(args)\n sys.exit(0)\n\n\ndef do_extract(args):\n if len(args.config) != 1:\n errmsg(\"Can only extract from exactly one BEAST XML file\")\n sys.exit(1)\n if not os.path.exists(args.config[0]):\n errmsg(\"No such BEAST XML file: %s\\n\" % args.config)\n sys.exit(2)\n try:\n messages = extract(args.config[0], args.overwrite)\n except Exception as e:\n errmsg(\"Error encountered while extracting BEASTling config and/or data files:\\n\")\n traceback.print_exc()\n sys.exit(3)\n for msg in messages:\n sys.stdout.write(msg)\n\n\ndef do_generate(args):\n\n # Make sure the requested configuration file exists\n for conf in args.config:\n if not os.path.exists(conf):\n errmsg(\"No such configuration file: %s\\n\" % conf)\n sys.exit(1)\n\n # Build but DON'T PROCESS the Config object\n # This is fast, and gives us enough information to check whether or not\n try:\n config = beastling.configuration.Configuration(\n configfile=args.config, stdin_data=args.stdin, prior=args.prior)\n except Exception as e: # PRAGMA: NO COVER\n errmsg(\"Error encountered while parsing configuration file:\\n\")\n traceback.print_exc()\n sys.exit(2)\n\n # Make sure we can write to the appropriate output filename\n output_filename = args.output if args.output else config.basename+\".xml\"\n if os.path.exists(output_filename) and not args.overwrite:\n errmsg(\"File %s already exists! Run beastling with the --overwrite option if you wish to overwrite it.\\n\" % output_filename)\n sys.exit(4)\n\n # Now that we know we will be able to save the resulting XML, we can take\n # the time to process the config object\n try:\n config.process()\n except Exception as e:\n errmsg(\"Error encountered while parsing configuration file:\\n\")\n traceback.print_exc()\n sys.exit(2)\n\n # Print messages\n ## Urgent messages are printed first, whether verbose mode is on or not\n for msg in config.urgent_messages:\n errmsg(msg + \"\\n\")\n ## Non-urgent messages are next, but only if verbose mode is on\n if args.verbose:\n for msg in config.messages:\n errmsg(msg + \"\\n\")\n\n # Build XML file\n try:\n xml = BeastXml(config)\n except Exception as e:\n errmsg(\"Error encountered while building BeastXML object:\\n\")\n traceback.print_exc()\n sys.exit(3)\n\n # Write XML file\n xml.write_file(output_filename)\n\n # Build and write report\n if args.report:\n report = BeastlingReport(config)\n report.write_file(config.basename+\".md\")\n geojson = BeastlingGeoJSON(config)\n geojson.write_file(config.basename+\".geojson\")\n\n # Build and write language list\n if args.language_list:\n write_language_list(config)\n\ndef write_language_list(config):\n\n with open(config.basename + \"_languages.txt\", \"w\") as fp:\n fp.write(\"\\n\".join(config.languages)+\"\\n\")\n", "sub_path": "beastling/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 5078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stderr.write", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "beastling.extractor.extract", "line_number": 83, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 98, "usage_type": "call"}, {"api_name": "beastling.beastxml.configuration.Configuration", "line_number": 103, "usage_type": "call"}, {"api_name": "beastling.beastxml.configuration", "line_number": 103, "usage_type": "attribute"}, {"api_name": "beastling.beastxml", "line_number": 103, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 107, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 114, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 122, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 123, "usage_type": "call"}, {"api_name": "beastling.beastxml.BeastXml", "line_number": 136, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 139, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 140, "usage_type": "call"}, {"api_name": "beastling.report.BeastlingReport", "line_number": 147, "usage_type": "call"}, {"api_name": "beastling.report.BeastlingGeoJSON", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "69096792", "text": "##################################################\n# pygame - 마우스 이벤트(MOUSEMOTION) 이해\n#\n# 참고 사이트 \n# https://pg.org\n# https://pg.org/docs/\n##################################################\n\n\nimport pygame as pg\nimport sys\nimport time\n\n# 초기화 작업\n# 반드시 주어진 함수이름과 순서를 지켜야 함.\n\npg.init()\nscreen = pg.display.set_mode((400, 300))\npg.display.set_caption(\"pygame 이해하기\")\n\nx1 = 0\ny1 = 0\nrunning = True\nwhile running:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n running = False\n if event.type == pg.MOUSEMOTION:\n pos = pg.mouse.get_pos()\n x1 = pos[0]\n y1 = pos[1]\n\n screen.fill((0, 0, 0))\n\n pg.draw.rect(screen, (0, 255, 0), (x1, y1, 44, 44), 2)\n\n pg.display.update()\n \n time.sleep(0.1)\n\nprint('메인루프 종료')\npg.quit()\n", "sub_path": "1006.py", "file_name": "1006.py", "file_ext": "py", "file_size_in_byte": 885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 37, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "89133984", "text": "#чоловіки жінки ієрархічна\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\ndata = pd.read_csv('1.csv', sep=';')\nprint(data.head())\n\nX = data.iloc[:, [0, 5, 6]].values\n#дендограма\ndendogram = sch.dendrogram(sch.linkage(X[:,[1,2]], method='ward'))\nplt.title('Dendogram')\nplt.xlabel('Clusters')\nplt.ylabel('Euclid dist')\nplt.show()\n# розбиттся на кластери\nhierc = AgglomerativeClustering(n_clusters= 3, affinity= 'euclidean', linkage='ward')\ny_hierc = hierc.fit_predict(X[:,[1,2]])\n# виводимо отримані дані на графік\nplt.scatter(X[y_hierc == 0, 1], X[y_hierc == 0,2], s = 100,c = 'y', label = 'Average')\nplt.scatter(X[y_hierc == 1, 1], X[y_hierc == 1,2], s = 100,c = 'b', label = 'The Best')\nplt.scatter(X[y_hierc == 2, 1], X[y_hierc == 2,2], s = 100,c = 'c', label = 'Worst')\nplt.title('Clusters of countries Hierarhial')\nplt.xlabel('Male')\nplt.ylabel('Female')\nplt.legend()\nplt.show()\n# виводимо списки країн\nprint('The best:')\nfor i in X[y_hierc == 1, 0]:\n print(i)\nprint(\" \")\nprint('Average:')\nfor i in X[y_hierc == 0, 0]:\n print(i)\nprint(\" \")\nprint('Worst:')\nfor i in X[y_hierc == 2, 0]:\n print(i)\n", "sub_path": "2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.dendrogram", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy", "line_number": 11, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.linkage", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "sklearn.cluster.AgglomerativeClustering", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "220999683", "text": "import psutil as ps\nimport time\nimport netifaces\n\n\nclass NetMonitor:\n def __init__(self):\n self._last_stat = None\n self._last_time = None\n\n def _get_net_io_counters(self):\n counters = ps.net_io_counters(pernic=True)\n\n ret = dict()\n\n for k, v in counters.items():\n ret[k] = v._asdict()\n ret[k].update({\n \"recv_per_sec\": 0,\n \"sent_per_sec\": 0\n })\n return ret\n\n def _set_last_stat(self, counters):\n self._last_stat = counters\n self._last_time = time.time()\n\n def update(self):\n counters = self._get_net_io_counters()\n\n for i in netifaces.interfaces():\n if 2 not in netifaces.ifaddresses(i).keys():\n continue\n counters[i].update({\"ip\": netifaces.ifaddresses(i)[2][0]['addr'], \"interface\": i})\n\n if not self._last_stat:\n self._set_last_stat(counters=counters)\n return counters\n\n delta = time.time() - self._last_time\n\n for k, v in counters.items():\n old = self._last_stat.get(k)\n counters[k].update({\n \"recv_per_sec\": round((v['bytes_recv'] - old['bytes_recv']) / delta / 1024, 2),\n \"sent_per_sec\": round((v['bytes_sent'] - old['bytes_sent']) / delta / 1024, 2)\n })\n\n self._set_last_stat(counters)\n\n def get(self):\n return self._last_stat\n\n\nif __name__ == '__main__':\n net_monitor = NetMonitor()\n net_monitor.update()\n while True:\n print(net_monitor.get()['eth0']['recv_per_sec'] / 1024)\n time.sleep(1)\n net_monitor.update()\n", "sub_path": "statbrowser/monitor/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "psutil.net_io_counters", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "netifaces.interfaces", "line_number": 31, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 32, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "28042027", "text": "from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nimport re\n\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$') \n\nclass Recipe:\n def __init__( self , data ):\n self.id = data['id']\n self.name = data['name']\n self.description = data['description']\n self.instructions = data['instructions']\n self.under30 = data['under30']\n self.author_id = data['author_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n # Now we use class methods to query our database\n \n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM recipes;\"\n # make sure to call the connectToMySQL function with the schema you are targeting.\n results = connectToMySQL('recipes').query_db(query)\n # Create an empty list to append our instances of friends\n recipes = []\n # Iterate over the db results and create instances of friends with cls.\n for recipe in results:\n recipes.append( cls(recipe) )\n return recipes\n\n @classmethod\n def get_by_id(cls, data):\n query = \"SELECT * FROM recipes WHERE id = %(id)s;\"\n # make sure to call the connectToMySQL function with the schema you are targeting.\n result = connectToMySQL('recipes').query_db(query, data)\n if len(result) < 1:\n return False\n return cls(result[0])\n\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO recipes ( name, description, instructions, under30, author_id, created_at, updated_at) VALUES ( %(name)s, %(description)s, %(instructions)s, %(under30)s, %(author_id)s, %(created_at)s, NOW());\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('recipes').query_db( query, data )\n\n @classmethod\n def update(cls, data):\n query = \"UPDATE recipes SET name = %(name)s, description = %(description)s, instructions = %(instructions)s, under30 = %(under30)s, created_at = %(created_at)s, updated_at = NOW() WHERE id = %(id)s;\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('recipes').query_db( query, data )\n\n @classmethod\n def delete(cls, data):\n query = \"DELETE FROM recipes WHERE id = %(id)s;\"\n print(query)\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('recipes').query_db( query, data )\n \n @staticmethod\n def validate(recipe):\n is_valid = True;\n if len(recipe['name']) < 3:\n flash(\"Recipe Name must be at least 3 characters\")\n is_valid = False \n if len(recipe['description']) < 3:\n flash(\"Description must be at least 3 characters\")\n is_valid = False \n if len(recipe['instructions']) < 3:\n flash(\"Instructions must be at least 3 characters\")\n is_valid = False \n if not recipe['created_at']:\n flash(\"Date created is required\", \"register\")\n is_valid = False \n return is_valid\n", "sub_path": "flask_mysql/validation/recipes/flask_app/models/recipe.py", "file_name": "recipe.py", "file_ext": "py", "file_size_in_byte": 3191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "re.compile", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_app.config.mysqlconnection.connectToMySQL", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "359088689", "text": "import json\n\ndef run(data, parameters):\n listDictionaries = json.loads(data)\n minVehicleCount = int(parameters['minVehicleCount'])\n maxVehicleCount = int(parameters['maxVehicleCount'])\n print('minVehicleCount is: ' + str(minVehicleCount))\n print('maxVehicleCount is: ' + str(maxVehicleCount))\n\n # Filter out the elements with a vehicleCount not within the minimum and maximum\n for element in listDictionaries[:]:\n vehicleCount = getVehicleCount(element)\n print('element is: ' + str(element))\n print('vehicleCount is: ' + str(vehicleCount))\n if vehicleCount is not None:\n if (vehicleCount > maxVehicleCount) or (vehicleCount < minVehicleCount):\n print('removing')\n listDictionaries.remove(element)\n else:\n print('not removing')\n continue\n else:\n listDictionaries.remove(element)\n print('removing')\n continue\n\n print('number of elements is: ' + str(len(listDictionaries)))\n for element in listDictionaries:\n vehicleCount = getVehicleCount(element)\n print('vehicleCount left is: ' + str(vehicleCount))\n # Add a vehicleCount key to the data\n element['vehicle count'] = str(vehicleCount)\n return json.dumps(listDictionaries)\n\ndef getVehicleCount(element):\n try:\n vehicleCount = int(element['properties']['count']['java.lang.Long'])\n except KeyError:\n vehicleCount = None\n\n # print('vehicleCount is: ' + str(vehicleCount))\n # print('type of vehicleCount is: ' + str(type(vehicleCount)))\n return vehicleCount\n", "sub_path": "vehicleCountFilterScript.py", "file_name": "vehicleCountFilterScript.py", "file_ext": "py", "file_size_in_byte": 1639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "json.loads", "line_number": 4, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "302851296", "text": "\"\"\"Ecolink 4655BC0-R device.\"\"\"\n\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, Identify, Ota, PollControl\nfrom zigpy.zcl.clusters.homeautomation import Diagnostic\nfrom zigpy.zcl.clusters.measurement import TemperatureMeasurement\nfrom zigpy.zcl.clusters.security import IasZone\n\nfrom zhaquirks import PowerConfigurationCluster\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass CustomPowerConfigurationCluster(PowerConfigurationCluster):\n \"\"\"Custom PowerConfigurationCluster.\"\"\"\n\n cluster_id = PowerConfigurationCluster.cluster_id\n MIN_VOLTS = 2.1\n MAX_VOLTS = 3.0\n\n\nclass Ecolink4655BC0R(CustomDevice):\n \"\"\"Ecolink 4655BC0-R device.\"\"\"\n\n signature = {\n # \n MODELS_INFO: [(\"Ecolink\", \"4655BC0-R\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.IAS_ZONE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n CustomPowerConfigurationCluster.cluster_id,\n Identify.cluster_id,\n PollControl.cluster_id,\n TemperatureMeasurement.cluster_id,\n IasZone.cluster_id,\n Diagnostic.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n CustomPowerConfigurationCluster,\n Identify.cluster_id,\n PollControl.cluster_id,\n TemperatureMeasurement.cluster_id,\n IasZone.cluster_id,\n Diagnostic.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n }\n }\n }\n", "sub_path": "zhaquirks/ecolink/contact.py", "file_name": "contact.py", "file_ext": "py", "file_size_in_byte": 2215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "zhaquirks.PowerConfigurationCluster", "line_number": 21, "usage_type": "name"}, {"api_name": "zhaquirks.PowerConfigurationCluster.cluster_id", "line_number": 24, "usage_type": "attribute"}, {"api_name": "zhaquirks.PowerConfigurationCluster", "line_number": 24, "usage_type": "name"}, {"api_name": "zigpy.quirks.CustomDevice", "line_number": 29, "usage_type": "name"}, {"api_name": "zhaquirks.const.MODELS_INFO", "line_number": 37, "usage_type": "name"}, {"api_name": "zhaquirks.const.ENDPOINTS", "line_number": 38, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 40, "usage_type": "name"}, {"api_name": "zhaquirks.const.DEVICE_TYPE", "line_number": 41, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 42, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 51, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.PROFILE_ID", "line_number": 40, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 40, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.DeviceType", "line_number": 41, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 41, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Basic.cluster_id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Basic", "line_number": 43, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Identify.cluster_id", "line_number": 45, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Identify", "line_number": 45, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.PollControl.cluster_id", "line_number": 46, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.PollControl", "line_number": 46, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.measurement.TemperatureMeasurement.cluster_id", "line_number": 47, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.measurement.TemperatureMeasurement", "line_number": 47, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.security.IasZone.cluster_id", "line_number": 48, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.security.IasZone", "line_number": 48, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.Diagnostic.cluster_id", "line_number": 49, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.Diagnostic", "line_number": 49, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Ota.cluster_id", "line_number": 51, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Ota", "line_number": 51, "usage_type": "name"}, {"api_name": "zhaquirks.const.ENDPOINTS", "line_number": 57, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 59, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 60, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 69, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.PROFILE_ID", "line_number": 59, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 59, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Basic.cluster_id", "line_number": 61, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Basic", "line_number": 61, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Identify.cluster_id", "line_number": 63, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Identify", "line_number": 63, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.PollControl.cluster_id", "line_number": 64, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.PollControl", "line_number": 64, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.measurement.TemperatureMeasurement.cluster_id", "line_number": 65, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.measurement.TemperatureMeasurement", "line_number": 65, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.security.IasZone.cluster_id", "line_number": 66, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.security.IasZone", "line_number": 66, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.Diagnostic.cluster_id", "line_number": 67, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.Diagnostic", "line_number": 67, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Ota.cluster_id", "line_number": 69, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Ota", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "22089702", "text": "import sys\nimport time\nimport copy\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\ndef scroll_bottom(browser, cycle):\n\t# scroll to bottom to get more URLs\n\tscript = \"var q=document.documentElement.scrollTop={}\"\n\tscroll = 10000\n\tfor i in range(cycle):\n\t\tjs = script.format(scroll)\n\t\tbrowser.execute_script(js)\n\t\tscroll += 500\n\t\ttime.sleep(0.5)\n\nurls = set()\ncurrent_urls = set()\nbackup_urls = set()\n\ndomain = \"https://youtube.com\"\noption = webdriver.ChromeOptions()\noption.add_argument(\"--proxy-server=http://127.0.0.1:8080\")\n\nbrowser = webdriver.Chrome(\"C:/Python/chromedriver75.exe\", options=option)\nbrowser.get(domain)\n\n# find urls on main page\nscroll_bottom(browser, 10)\nthumbnails = browser.find_elements_by_id(\"thumbnail\")\nfor element in thumbnails:\n\turl = element.get_attribute('href')\n\tif url:\n\t\turls.add(url)\n\t\tcurrent_urls.add(url)\n\ntime.sleep(1)\n\nprint('phase 1 finished.')\nprint(str(len(urls)) + \" urls have been captured.\")\nprint()\n\n# find urls in sub-pages\nfor i in range(5):\n\tfor url in current_urls:\n\t\tbrowser.get(url)\n\t\tbrowser.execute_script('videos = document.querySelectorAll(\"video\"); for(video of videos) {video.pause()}')\n\t\t\n\t\tscroll_bottom(browser, 5)\n\t\tendpoints = browser.find_elements_by_class_name('yt-simple-endpoint')\n\t\tfor ep in endpoints:\n\t\t\turl = ep.get_attribute('href')\n\t\t\tif url:\n\t\t\t\turls.add(url)\n\t\t\t\tbackup_urls.add(url)\n\n\t\tif len(urls) > 2000:\n\t\t\tbrowser.close()\n\n\t\t\tprint(str(len(urls)) + \" urls have been captured.\")\n\t\t\t# print(urls)\n\n\t\t\toutput = open('urls.txt', 'w')\n\t\t\tfor url in urls:\n\t\t\t\toutput.write(url + \"\\n\")\n\n\t\t\toutput.close()\n\t\t\tsys.exit(0)\n\n\t\ttime.sleep(0.5)\n\n\tcurrent_urls = backup_urls\n\tbackup_urls.clear()\n\n\tprint(\"phase 2 finished.\")\n\tprint(str(len(urls)) + \" urls have been captured.\")\n\tprint()\n\nbrowser.close()\t\n\n", "sub_path": "scrapy.py", "file_name": "scrapy.py", "file_ext": "py", "file_size_in_byte": 1817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "649364548", "text": "#!/usr/bin/python\n# Deploys the master branch to the hutmap.com dreamhost account\n#\n# Specifically, merges master into dreamhost, builds css and js files with new\n# uuid suffixes, pulls this all into dreamhost, and restarts the server.\n# \n# Assumptions:\n# - git on the PATH (i.e. you can use git from a terminal)\n# - the vagrant vm is up and running\n# - ssh on the PATH (manual steps printed if ssh fails)\n\n\nfrom os.path import join, dirname, normpath\nimport os\nimport shlex\nimport subprocess\nimport time\nimport urllib2\nimport uuid\nimport shutil\n\ndef shell(cmd, **kwargs):\n args = shlex.split(cmd) \n subprocess.check_call(args, **kwargs)\n\n\nbase_path = normpath(join(dirname(__file__), '..', '..'))\nos.chdir(base_path)\n\nhutmap_ssh = 'hutmap@ssh.hutmap.com'\nvers = uuid.uuid1()\n\nsuccess1 = False\ntry:\n shutil.rmtree('public/static/css', ignore_errors=True)\n shutil.rmtree('public/static/js', ignore_errors=True)\n shell('git checkout dreamhost')\n shell('git pull origin dreamhost')\n shell('git merge -s resolve master -m\"Merge master into branch dreamhost\"')\n shell('git rm -r public/static/css/ public/static/js/')\n shell('python scripts/utils/shovel-server.py start')\n\n time.sleep(1)\n urllib2.urlopen('http://localhost:3000/build.css?version={0}'.format(vers))\n urllib2.urlopen('http://localhost:3000/build.js?version={0}'.format(vers))\n\n shell('git add public/static/css/ public/static/js/')\n shell('git commit -m\"Version {0}\"'.format(vers))\n shell('git push origin dreamhost')\n success1 = True\nfinally:\n shell('git checkout master')\n\nif success1:\n success2 = False\n try:\n deploy_remote = open(os.path.join(base_path, 'scripts', 'utils', 'deploy-dreamhost-remote.sh'))\n subprocess.check_call(['ssh', hutmap_ssh, 'bash -s {}'.format(vers)], stdin=deploy_remote)\n success2 = True\n except Exception as e:\n print(e)\n\n if success1 and success2:\n print('\\nDeploy successful!\\n')\n elif success1 and not success2:\n print('\\n\\nThere were errors but you can still complete the deployment.\\n')\n print('To complete, ssh in and run the following:')\n print(' hutmap.com/scripts/utils/deploy-dreamhost-remote.sh {}\\n'.format(vers))\n print('Or all in one go:')\n print(' ssh {} \"bash -s {}\" < scripts/utils/deploy-dreamhost-remote.sh\\n'.format(hutmap_ssh, vers))\n else:\n print('\\n Deploy failed. Look at the stack trace printed below for more details.\\n')\n", "sub_path": "scripts/utils/deploy-dreamhost.py", "file_name": "deploy-dreamhost.py", "file_ext": "py", "file_size_in_byte": 2397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "shlex.split", "line_number": 23, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 31, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 35, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "328569008", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 18 16:59:20 2020\r\n\r\n@author: Jieyun Hu\r\n\"\"\"\r\n\r\n# This file includes preparing the data, encoding and modeling\r\n# To predict two type of activity. Click = 0 , Swipe = 1.\r\n# put this file outside the filtered_traces folder\r\nimport os\r\nimport json\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport collections \r\n\r\nnpy_file_dir = './ui_layout_vectors/ui_vectors.npy'\r\njson_file_dir = './ui_layout_vectors/ui_names.json'\r\nlst = os.listdir(\"filtered_traces\") # For exploring the filtered_traces file\r\nindex_search_file = []\r\n\r\nwith open(json_file_dir) as json_file:\r\n index_search_file = json.load(json_file)['ui_names']\r\nvectors = np.load(npy_file_dir) \r\n \r\ndef search_for_index(ui_name):\r\n full_name = ui_name + '.png'\r\n return index_search_file.index(full_name)\r\n\r\n#find the 64-dim vector\r\ndef search_for_vector(index):\r\n return vectors[index,:] \r\n\r\ndef ui_to_vector(ui_name):\r\n return vectors[search_for_index(ui_name),:]\r\n\r\ndef gestures_to_vectors(gestures_dir): \r\n with open(gestures_dir) as json_file:\r\n gestures = json.load(json_file)\r\n get_ui_seq = [*gestures]\r\n vecs = []\r\n for ui in get_ui_seq:\r\n vecs.append(ui_to_vector(ui))\r\n return vecs \r\n\r\n\r\n# Given a list of paths of file directory.\r\n# return[0] is 67 vectors which concatenate 64-dim vectors with 3 dim vector representation of activity\r\n# return[1] is only 3 dim vector representing activity. I haven't used it, but it may be useful later.\r\n \r\ndef gestures_array_to_vector(gestures_dir_array):\r\n #print(gestures_dir_array)\r\n res = []\r\n res_y = []\r\n for gestures_dir in gestures_dir_array:\r\n with open(gestures_dir) as json_file:\r\n gestures = json.load(json_file)\r\n get_ui_seq = [*gestures]\r\n vecs = []\r\n vecs_y = []\r\n for ui in get_ui_seq:\r\n try:\r\n vector_64 = ui_to_vector(ui) #add 64 dim vector to activity vector\r\n\r\n lst_of_activity = gestures.get(ui)\r\n if len(lst_of_activity) == 1: #click\r\n temp = [0]\r\n temp.extend(lst_of_activity[0])\r\n temp = np.asarray(temp)\r\n vecs_y.append(temp) # e.g [0, coorX, coorY]\r\n vector_67 = np.concatenate((vector_64,temp),axis=0)\r\n #print(len(vector_67))\r\n vecs.append(vector_67)# 64 dim vector add to the activity vector\r\n elif len(lst_of_activity) > 1: #swipe\r\n average_of_coor = [float(sum(l))/len(l) for l in zip(*lst_of_activity)]\r\n temp = [1]\r\n temp.extend(average_of_coor) # e.g [1, coorX, coorY]\r\n temp = np.asarray(temp)\r\n vecs_y.append(temp)\r\n vector_67 = np.concatenate((vector_64,temp),axis=0)\r\n vecs.append(vector_67)\r\n except:\r\n pass\r\n #print(vecs_y)\r\n #print(vecs)\r\n res.append(vecs) \r\n res_y.append(vecs_y)\r\n return [res,res_y] \r\n \r\ndict = collections.defaultdict(list) \r\ndef trace_length_to_dictionary():\r\n for f in lst:\r\n sublst = os.listdir(\"filtered_traces/\"+f)\r\n for sub in sublst:\r\n file_name = \"filtered_traces/\"+f+\"/\"+sub+\"/gestures.json\"\r\n with open(file_name) as json_file:\r\n data = json.load(json_file)\r\n data_len = len(data)\r\n #dict[data_len].append(f)\r\n dict[data_len].append(file_name)\r\n \r\n#trace_length_to_dictionary need to be run ahead of this\r\n#return the list of file names with the same length of trace\r\ndef find_files_by_count(count):\r\n return dict[count] \r\n \r\n#given an start and end index, find all files with in the range of trace steps\r\ndef find_all_files_in_range(start,end):\r\n res = []\r\n for i in range(start,end):\r\n l = dict[i]\r\n for each in l:\r\n res.append(each)\r\n return res\r\n\r\n\r\ntrace_length_to_dictionary()\r\ntrace_dir_array = find_all_files_in_range(20,54)\r\nvectors_array = gestures_array_to_vector(trace_dir_array)[0]\r\n\r\n#print(vectors_array[0])\r\n\r\n\r\nfrom tensorflow.keras.layers import Input, SimpleRNN, GRU, LSTM, Dense, Flatten, Dropout, GlobalMaxPool1D\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import SGD, Adam\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#RNN model\r\n# N = number of samples\r\n# T = sequence length\r\n# D = number of input features\r\n# M = number of hidden units\r\n# K = number of output units\r\nN = 1\r\nT = 1\r\nD = 67\r\nM = 10\r\nK = 1\r\n\r\ni = Input(shape = (T,D))\r\nx = SimpleRNN(M, return_sequences=True)(i)\r\nx = GlobalMaxPool1D()(x)\r\nx = Dropout(0.5)(x)\r\nx = Dense(K,activation = 'relu')(x)\r\nmodel = Model(i,x)\r\n#model.compile( loss = 'mse', metrics = ['accuracy'], optimizer = Adam(lr = 0.001),)\r\nmodel.compile(loss = 'binary_crossentropy', optimizer = Adam(lr=0.001), metrics = ['accuracy'],)\r\n\r\n\r\n# split the dataset array to X and y\r\ndef split_dataset_array(dataset_array, time_step):\r\n X, y = list(), list()\r\n for dataset in dataset_array:\r\n dataset = np.array(dataset)\r\n len_ = len(dataset)\r\n x_index = 0\r\n y_index = T\r\n while y_index < len_:\r\n x_input = dataset[x_index:(x_index+time_step), :]\r\n y_input = dataset[y_index,:][64]\r\n X.append(x_input)\r\n y.append(y_input)\r\n x_index +=1\r\n y_index +=1\r\n return array(X), array(y)\r\n\r\n\r\n\r\nX, y = split_dataset_array(vectors_array, T)\r\n#print(X.shape)\r\n#print(y.shape)\r\n#print(y)\r\nr = model.fit(X, y, epochs = 200, validation_split = 0.4)\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nf1 = plt.figure(1)\r\nplt.title('Loss')\r\nplt.plot(r.history['loss'], label = 'train')\r\nplt.plot(r.history['val_loss'], label = 'test')\r\nplt.legend()\r\nf1.show()\r\n\r\nf2 = plt.figure(2)\r\nplt.title('Accuracy')\r\nplt.plot(r.history['acc'], label = 'train')\r\nplt.plot(r.history['val_acc'], label = 'test')\r\nplt.legend()\r\nf2.show()\r\n\r\n\r\n#prediction test\r\n\r\ntest = ['filtered_traces/com.linkedin.android/trace_0/gestures.json']\r\nres = gestures_array_to_vector(test)\r\n#testing for a ui\r\n#The model just predict every next activity to 0\r\nfor i in range(len(res[0][0])):\r\n x = res[0][0][i].reshape(1,1,67)\r\n yhat = model.predict(x)\r\n print (yhat.argmax(axis=-1))\r\n", "sub_path": "encoding_modeling.py", "file_name": "encoding_modeling.py", "file_ext": "py", "file_size_in_byte": 6613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 24, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 80, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 90, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 93, "usage_type": "call"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.SimpleRNN", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GlobalMaxPool1D", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}]} +{"seq_id": "409642494", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 03 09:40:19 2014\n\n@author: Kyle Ellefsen\n\"\"\"\nfrom __future__ import (absolute_import, division,print_function, unicode_literals)\nfrom future.builtins import (bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\n\nimport numpy as np\nimport scipy\nimport global_vars as g\nimport scipy.ndimage \nfrom skimage import feature\nfrom skimage.filters import threshold_adaptive\nfrom process.BaseProcess import BaseProcess, SliderLabel, WindowSelector, MissingWindowError\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import * \n\n__all__ = ['threshold','remove_small_blobs','adaptive_threshold','logically_combine','binary_dilation','binary_erosion']\n \n \ndef convert2uint8(tif):\n oldmin=np.min(tif)\n oldmax=np.max(tif)\n newmax=2**8-1\n tif=((tif-oldmin)*newmax)/(oldmax-oldmin)\n tif=tif.astype(np.uint8)\n return tif\n \nclass Threshold(BaseProcess):\n \"\"\"threshold(value, darkBackground=False, keepSourceWindow=False)\n Creates a boolean matrix by applying a threshold\n \n Parameters:\n | value (float) -- The threshold to be applied\n | darkBackground (bool) -- If this is True, pixels below the threshold will be True\n Returns:\n newWindow\n \"\"\"\n def __init__(self):\n super().__init__()\n def gui(self):\n self.gui_reset()\n valueSlider=SliderLabel(2)\n if g.m.currentWindow is not None:\n image=g.m.currentWindow.image\n valueSlider.setRange(np.min(image),np.max(image))\n valueSlider.setValue(np.mean(image))\n preview=QCheckBox()\n preview.setChecked(True)\n self.items.append({'name':'value','string':'Value','object':valueSlider})\n self.items.append({'name':'darkBackground','string':'Dark Background','object':QCheckBox()})\n self.items.append({'name':'preview','string':'Preview','object':preview})\n super().gui()\n def __call__(self,value,darkBackground=False, keepSourceWindow=False):\n self.start(keepSourceWindow)\n if darkBackground:\n newtif=self.tifvalue\n self.newtif=newtif.astype(np.uint8)\n self.newname=self.oldname+' - Thresholded '+str(value)\n return self.end()\n def preview(self):\n value=self.getValue('value')\n preview=self.getValue('preview')\n darkBackground=self.getValue('darkBackground')\n nDim=len(g.m.currentWindow.image.shape)\n if preview:\n if nDim==3: # if the image is 3d\n testimage=np.copy(g.m.currentWindow.image[g.m.currentWindow.currentIndex])\n elif nDim==2:\n testimage=np.copy(g.m.currentWindow.image)\n if darkBackground:\n testimage=testimagevalue\n g.m.currentWindow.imageview.setImage(testimage,autoLevels=False)\n g.m.currentWindow.imageview.setLevels(-.1,1.1)\n else:\n g.m.currentWindow.reset()\n if nDim==3:\n image=g.m.currentWindow.image[g.m.currentWindow.currentIndex]\n else:\n image=g.m.currentWindow.image\n g.m.currentWindow.imageview.setLevels(np.min(image),np.max(image))\nthreshold=Threshold()\n\nclass BlocksizeSlider(SliderLabel):\n def __init__(self,demicals=0):\n SliderLabel.__init__(self,demicals)\n def updateSlider(self,value):\n if value%2==0:\n if value\"\n\n\n@training_app.route(\"/set_trainer\")\ndef set_trainer() -> str:\n training_url = url_for(\"training.training\")\n return f\"\"\"\n
\n \n \n
\n \"\"\"\n\n\n@training_app.route(\"/done\")\ndef done():\n return \"Congratulations! You are now qualified for the HIT!\"\n\n\n@training_app.route(\"/\")\ndef training():\n external_worker_id = request.args.get(\"external_worker_id\")\n if not external_worker_id:\n return redirect(url_for(\"training.set_trainer\"))\n\n training = training_controller.get_next_training_for_worker(external_worker_id)\n\n if training == \"__DONE__\":\n return redirect(url_for(\"training.done\"))\n\n if not training or training[\"id\"] == 0:\n return render_template(\n \"video.html\", task_id=0, external_worker_id=external_worker_id\n )\n else:\n return render_template(\n \"training.html\",\n history=training[\"history\"],\n replies=training[\"replies\"],\n description=training[\"description\"],\n task_id=training[\"id\"],\n external_worker_id=external_worker_id,\n submit_url=url_for(\"training.training_submit\"),\n with_audio=False,\n used_text_input=True,\n )\n\n\n@training_app.route(\"/training_submit\", methods=[\"POST\"])\ndef training_submit():\n external_worker_id = request.form[\"external_worker_id\"]\n task_id = int(request.form[\"task_id\"])\n if task_id == 0:\n the_time = float(request.form.get(\"task_identifier\", 0.0)) - 3315.2\n if the_time < 0.9 or the_time > 1.0:\n return \"Please watch the whole video and try again.\"\n\n try:\n done_training = training_controller.submit(external_worker_id, task_id)\n except KeyError:\n return redirect(\n url_for(\"training.training\", external_worker_id=external_worker_id)\n )\n except Exception:\n print(traceback.format_exc())\n return \"Sorry! Something went wrong. Please email {} and provide your worker id to fix this issue.\".format(\n CONTACT_EMAIL\n )\n\n if done_training:\n try:\n mturk.qualify_worker(external_worker_id)\n return redirect(url_for(\"training.done\"))\n except Exception:\n print(traceback.format_exc())\n return \"You are done, but something went wrong with your qualification. Please email {} and provide your worker id to fix this issue.\".format(\n CONTACT_EMAIL\n )\n else:\n return redirect(\n url_for(\"training.training\", external_worker_id=external_worker_id)\n )\n", "sub_path": "crowd_sourcing/pages/training.py", "file_name": "training.py", "file_ext": "py", "file_size_in_byte": 3018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 30, "usage_type": "call"}, {"api_name": "controllers.training_controller.get_next_training_for_worker", "line_number": 32, "usage_type": "call"}, {"api_name": "controllers.training_controller", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "controllers.training_controller.submit", "line_number": 65, "usage_type": "call"}, {"api_name": "controllers.training_controller", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 71, "usage_type": "call"}, {"api_name": "fantom_util.mturk.qualify_worker", "line_number": 78, "usage_type": "call"}, {"api_name": "fantom_util.mturk", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 79, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "286640403", "text": "\"\"\"\nAuthor:\n\n oliver\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport datetime\n\nimport time\nimport subprocess\n\nimport matplotlib\n# matplotlib.use(\"Agg\") # This suppresses the annoying Python Rocket GUI indicator in the dock.\nmatplotlib.use('TkAgg')\nimport pylab\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport matplotlib.dates as mdates\n\ndt = datetime.datetime\n\nif False:\n\n d_gov = pd.read_excel(\"key_rates.xlsx\", \"10y_gv\", 0, range(5))\n # d_gov.head()\n d_gov.columns = [\"date\", \"px\", \"yield\"]\n # d_gov.date[0]\n ts_gov = d_gov[(d_gov.date >= dt(2010,1,1)) & (d_gov.date <= dt(2015,9,1))]\n # ts_gov = d_gov[(d_gov.date >= dt(2003,1,1)) & (d_gov.date <= dt(2015,9,1))]\n # d_gov.shape\n # ts_gov.shape\n\n # loading into the console the data from cprcdralpha.py\n # d is the cprcdralphda data\n ts = d[(d.coupon > 5.0) & (d.origination >= datetime.date(2003,1,1)) & (d.origination <= datetime.date(2004,1,1))]\n ts = ts[ts.cnloans > 500]\n ts = ts[ts.cdr1 >= 0.0] # Because I want to use a log scale and these aren't that helpful\n ts = ts[ts.crr1 >= 0.0]\n ts2 = ts[ts.date >= datetime.datetime(2003,1,1)]\n\n # plt.clf()\n # plt.gca().axes.twinx().set_ylabel(\"fkjdhs\")\n # plt.rc('text', usetex=True)\n # plt.rcParams[\"figure.figsize\"] = (8, 5)\n # plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], 'size': 16})\n # plt.plot(ts_gov.date, 10.0*ts_gov.px, 'k-', linewidth=2)\n # plt.plot(ts2.date, ts2.crr1, 'o', markersize=5, mfc=\"grey\", alpha=0.5)\n # plt.title(\"Prepayment of FNMA and FHLMC Pass-Throughs\")\n # plt.gca().axes.twinx().set_ylabel(\"dsfsdf\")\n # plt.ylabel(\"Prepayment Rate\")\n # plt.xlabel(\"Date\")\n # plt.yticks(plt.yticks()[0], [str(int(i)) + \"\\\\%\" for i in plt.yticks()[0]])\n # plt.tick_params(pad=10)\n\n ts_fed = d_fed[d_fed.date >= dt(2003,1,1)]\n\n plt.clf()\n plt.tick_params(pad=10)\n # plt.tick_params(pad=10, length=5, width=1.3, which='major')\n plt.title(\"Prepayment of FNMA and FHLMC Pass-Throughs\")\n plt.rc('text', usetex=True)\n plt.rcParams[\"figure.figsize\"] = (8, 5)\n plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], 'size': 16})\n fig = plt.gcf()\n # fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.plot(ts2.date, ts2.crr1, 'o', markersize=5, mfc=\"grey\", alpha=0.5)\n ax1.set_xlabel('Date')\n ax1.set_ylabel(\"Prepayment Rate\")\n # ax1.get_yticks()\n ax1.set_yticklabels([str(int(i)) + \"\\\\%\" for i in ax1.get_yticks()])\n ax2 = ax1.twinx()\n # ax2.plot(ts_gov.date, ts_gov.px, 'w-', linewidth=6)\n # ax2.plot(ts_gov.date, ts_gov.px, 'k-', linewidth=1)\n ax2.set_ylabel(\"US Treasury 10-Year Bond Yield\")\n ax2.plot(ts_fed.date, ts_fed.funds, 'w-', linewidth=5)\n ax2.plot(ts_fed.date, ts_fed.funds, 'k-', linewidth=1)\n ax2.set_ylabel(\"US Effective Federal Funds Rate\")\n ax2.set_yticklabels([str(i) + \"\\\\%\" for i in ax2.get_yticks()])\n ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n file_name_1 = \"{}{}{}\".format(\"Plots/\", ts, \".pdf\")\n file_name_2 = \"{}{}{}{}\".format(\"Plots/\", ts, \"_dated\", \".pdf\")\n plt.savefig(file_name_1, format=\"pdf\", bbox_inches='tight')\n ax1.set_xlabel(\"Date \\n File: {}.pdf\".format(ts.replace(\"_\", \"\\_\")), labelpad=5)\n plt.savefig(file_name_2, format=\"pdf\", bbox_inches='tight')\n subprocess.call([\"open\", file_name_2])\n", "sub_path": "Python/other_rates.py", "file_name": "other_rates.py", "file_ext": "py", "file_size_in_byte": 3441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.use", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "397983169", "text": "#!/usr/bin/env python\n# coding:utf-8\n\"\"\"\nAuthor:\n LiTeng 1471356861@qq.com\n\nImplement TextRNN, contains LSTM,GRU,RNN\nReference: \"Effective LSTMs for Target-Dependent Sentiment Classification\"\n \"Bidirectional LSTM-CRF Models for Sequence Tagging\"\n \"Generative and discriminative text classification\n with recurrent neural networks\"\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom model.layers.embeddings import EmbeddingsLayer\nfrom utils.logger import Type\n\n\nclass RNNType(Type):\n RNN = 'RNN'\n LSTM = 'LSTM'\n GRU = 'GRU'\n\n @classmethod\n def str(cls):\n return \",\".join([cls.RNN, cls.LSTM, cls.GRU])\n\n\nclass Model(tf.keras.Model):\n \"\"\"\n One layer rnn.\n \"\"\"\n def __init__(self, config):\n super(Model, self).__init__()\n self.config = config\n if self.config.embedding.use_embedding:\n self.embedding = EmbeddingsLayer(config.embedding)\n else:\n self.reshape = keras.layers.Reshape((config.TextRNN.input_length, config.TextRNN.embedding_dimension))\n\n if self.config.TextRNN.rnn_type == RNNType.LSTM:\n layer_cell = keras.layers.LSTM\n elif self.config.TextRNN.rnn_type == RNNType.GRU:\n layer_cell = keras.layers.GRU\n else:\n layer_cell = keras.layers.SimpleRNN\n\n self.rnn_type = config.TextRNN.rnn_type\n self.num_layers = config.TextRNN.num_layers\n self.bidirectional = config.TextRNN.bidirectional\n\n self.layer_cells = []\n for i in range(config.TextRNN.num_layers):\n if config.TextRNN.bidirectional:\n self.layer_cells.append(keras.layers.Bidirectional(\n layer_cell(config.TextRNN.hidden_dimension,\n use_bias=config.TextRNN.use_bias,\n activation=config.TextRNN.activation,\n kernel_regularizer=keras.regularizers.l2(self.config.TextRNN.l2 * 0.1),\n recurrent_regularizer=keras.regularizers.l2(self.config.TextRNN.l2))))\n else:\n self.layer_cells.append(layer_cell(config.TextRNN.hidden_dimension,\n use_bias=config.TextRNN.use_bias,\n activation=config.TextRNN.activation,\n kernel_regularizer=keras.regularizers.l2(self.config.TextRNN.l2 * 0.1),\n recurrent_regularizer=keras.regularizers.l2(self.config.TextRNN.l2)))\n\n self.fc = keras.layers.Dense(config.TextRNN.num_classes)\n\n def call(self, inputs, training=None, mask=None):\n\n print(\"inputs\", inputs)\n x = inputs\n if self.config.embedding.use_embedding:\n # [b, sentence len] => [b, sentence len, word embedding]\n x = self.embedding(x)\n print(\"embedding\", x)\n else:\n x = self.reshape(x)\n\n for layer_cell in self.layer_cells:\n x = layer_cell(x)\n print('rnn', x)\n\n x = self.fc(x)\n print(x.shape)\n\n if self.config.logits_type == \"softmax\":\n x = tf.nn.softmax(x)\n elif self.config.logits_type == \"sigmoid\":\n x = tf.nn.sigmoid(x)\n\n return x\n\n\n", "sub_path": "model/classification/textrnn.py", "file_name": "textrnn.py", "file_ext": "py", "file_size_in_byte": 3277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "utils.logger.Type", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.keras", "line_number": 31, "usage_type": "attribute"}, {"api_name": "model.layers.embeddings.EmbeddingsLayer", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 44, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Bidirectional", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "name"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 62, "usage_type": "name"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 67, "usage_type": "name"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.nn.softmax", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "522554903", "text": "\"\"\"Common features for bignum in test generation framework.\"\"\"\n# Copyright The Mbed TLS Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport typing\n\nfrom abc import abstractmethod\nfrom typing import Iterator, List, Tuple, TypeVar\n\nT = TypeVar('T') #pylint: disable=invalid-name\n\ndef invmod(a: int, n: int) -> int:\n \"\"\"Return inverse of a to modulo n.\n\n Equivalent to pow(a, -1, n) in Python 3.8+. Implementation is equivalent\n to long_invmod() in CPython.\n \"\"\"\n b, c = 1, 0\n while n:\n q, r = divmod(a, n)\n a, b, c, n = n, c, b - q*c, r\n # at this point a is the gcd of the original inputs\n if a == 1:\n return b\n raise ValueError(\"Not invertible\")\n\ndef hex_to_int(val: str) -> int:\n return int(val, 16) if val else 0\n\ndef quote_str(val) -> str:\n return \"\\\"{}\\\"\".format(val)\n\ndef bound_mpi(val: int, bits_in_limb: int) -> int:\n \"\"\"First number exceeding number of limbs needed for given input value.\"\"\"\n return bound_mpi_limbs(limbs_mpi(val, bits_in_limb), bits_in_limb)\n\ndef bound_mpi_limbs(limbs: int, bits_in_limb: int) -> int:\n \"\"\"First number exceeding maximum of given number of limbs.\"\"\"\n bits = bits_in_limb * limbs\n return 1 << bits\n\ndef limbs_mpi(val: int, bits_in_limb: int) -> int:\n \"\"\"Return the number of limbs required to store value.\"\"\"\n return (val.bit_length() + bits_in_limb - 1) // bits_in_limb\n\ndef combination_pairs(values: List[T]) -> List[Tuple[T, T]]:\n \"\"\"Return all pair combinations from input values.\n\n The return value is cast, as older versions of mypy are unable to derive\n the specific type returned by itertools.combinations_with_replacement.\n \"\"\"\n return typing.cast(\n List[Tuple[T, T]],\n list(itertools.combinations_with_replacement(values, 2))\n )\n\n\nclass OperationCommon:\n \"\"\"Common features for bignum binary operations.\n\n This adds functionality common in binary operation tests.\n\n Attributes:\n symbol: Symbol to use for the operation in case description.\n input_values: List of values to use as test case inputs. These are\n combined to produce pairs of values.\n input_cases: List of tuples containing pairs of test case inputs. This\n can be used to implement specific pairs of inputs.\n unique_combinations_only: Boolean to select if test case combinations\n must be unique. If True, only A,B or B,A would be included as a test\n case. If False, both A,B and B,A would be included.\n \"\"\"\n symbol = \"\"\n input_values = [] # type: List[str]\n input_cases = [] # type: List[Tuple[str, str]]\n unique_combinations_only = True\n\n def __init__(self, val_a: str, val_b: str) -> None:\n self.arg_a = val_a\n self.arg_b = val_b\n self.int_a = hex_to_int(val_a)\n self.int_b = hex_to_int(val_b)\n\n def arguments(self) -> List[str]:\n return [\n quote_str(self.arg_a), quote_str(self.arg_b)\n ] + self.result()\n\n @abstractmethod\n def result(self) -> List[str]:\n \"\"\"Get the result of the operation.\n\n This could be calculated during initialization and stored as `_result`\n and then returned, or calculated when the method is called.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_value_pairs(cls) -> Iterator[Tuple[str, str]]:\n \"\"\"Generator to yield pairs of inputs.\n\n Combinations are first generated from all input values, and then\n specific cases provided.\n \"\"\"\n if cls.unique_combinations_only:\n yield from combination_pairs(cls.input_values)\n else:\n yield from (\n (a, b)\n for a in cls.input_values\n for b in cls.input_values\n )\n yield from cls.input_cases\n\n# BEGIN MERGE SLOT 1\n\n# END MERGE SLOT 1\n\n# BEGIN MERGE SLOT 2\n\n# END MERGE SLOT 2\n\n# BEGIN MERGE SLOT 3\n\n# END MERGE SLOT 3\n\n# BEGIN MERGE SLOT 4\n\n# END MERGE SLOT 4\n\n# BEGIN MERGE SLOT 5\n\n# END MERGE SLOT 5\n\n# BEGIN MERGE SLOT 6\n\n# END MERGE SLOT 6\n\n# BEGIN MERGE SLOT 7\n\n# END MERGE SLOT 7\n\n# BEGIN MERGE SLOT 8\n\n# END MERGE SLOT 8\n\n# BEGIN MERGE SLOT 9\n\n# END MERGE SLOT 9\n\n# BEGIN MERGE SLOT 10\n\n# END MERGE SLOT 10\n", "sub_path": "scripts/mbedtls_dev/bignum_common.py", "file_name": "bignum_common.py", "file_ext": "py", "file_size_in_byte": 4802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "typing.TypeVar", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 65, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 66, "usage_type": "name"}, {"api_name": "itertools.combinations_with_replacement", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "3765698", "text": "from django.urls import path\nfrom .views import CategoryView, ProductListView, OrderCreateView\nfrom rest_framework import routers\n\napp_name = 'products'\n\nrouter = routers.DefaultRouter()\nrouter.register('products', ProductListView, basename='product')\n\nurlpatterns = [\n path('categories/', CategoryView.as_view()),\n path('purchase/', OrderCreateView.as_view()),\n] + router.urls\n", "sub_path": "products/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 7, "usage_type": "name"}, {"api_name": "views.ProductListView", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.CategoryView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.CategoryView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrderCreateView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrderCreateView", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "300483046", "text": "import time\nimport argparse\nimport pynvml\n\n\nclass Device(object):\n class Status:\n INIT = \"INIT\"\n DETECTING = \"DETECTING\"\n STOP = \"STOP\"\n\n start_detecting_mem_threshold = 32 * 1024 * 1024\n\n def __init__(self, handle):\n self.handle = handle\n self.status = self.Status.INIT\n self.max_mem_usage = 0\n\n def update(self):\n info = pynvml.nvmlDeviceGetMemoryInfo(self.handle)\n if self.status == self.Status.INIT:\n if info.used > self.start_detecting_mem_threshold:\n self.status = self.Status.DETECTING\n elif self.status == self.Status.DETECTING:\n if info.used < self.start_detecting_mem_threshold:\n self.status = self.Status.STOP\n return False\n else:\n self.max_mem_usage = max(self.max_mem_usage, info.used)\n elif self.status == self.Status.STOP:\n raise ValueError(\"detecting is stop\")\n else:\n raise ValueError(\"invalid status\")\n\n return True\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"collect GPU device memory usage\")\n parser.add_argument(\"-g\", type=int, default=1, help=\"number of gpu devices\")\n parser.add_argument(\"-n\", type=float, default=1, help=\"metrics rate\")\n args = parser.parse_args()\n\n pynvml.nvmlInit()\n n_gpus = args.g\n devices = [Device(pynvml.nvmlDeviceGetHandleByIndex(i)) for i in range(n_gpus)]\n\n running = True\n while running:\n time.sleep(args.n)\n running = False\n for device in devices:\n running |= device.update()\n\n pynvml.nvmlShutdown()\n for i, device in enumerate(devices):\n max_mem_usage_mbytes = device.max_mem_usage / 1024 / 1024\n print(f\"gpt{i} max memory usage: {max_mem_usage_mbytes:.2f}M\")\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "LanguageModeling/gpt-2/tools/gpu_memory_usage.py", "file_name": "gpu_memory_usage.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pynvml.nvmlDeviceGetMemoryInfo", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "pynvml.nvmlInit", "line_number": 44, "usage_type": "call"}, {"api_name": "pynvml.nvmlDeviceGetHandleByIndex", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "pynvml.nvmlShutdown", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "52024111", "text": "import os\nimport json\nimport base64\n\nfrom typing import (Dict, Optional, Any, List)\nfrom collections import OrderedDict\n\nfrom gtmcore.labbook.labbook import LabBook\nfrom gtmcore.activity import ActivityStore, ActivityType, ActivityRecord, ActivityDetailType, ActivityDetailRecord, \\\n ActivityAction\nfrom gtmcore.activity.utils import ImmutableList, DetailRecordList, TextData\n\n\nclass BundledAppManager:\n \"\"\"Class to manage bundled apps within a labbook instance\"\"\"\n def __init__(self, labbook: LabBook) -> None:\n # LabBook Environment\n self.labbook = labbook\n\n @property\n def bundled_app_file(self):\n return os.path.join(self.labbook.root_dir, '.gigantum', 'apps.json')\n\n @property\n def reserved_names(self) -> list:\n \"\"\"A property for all reserved application names. These are names that are currently used in Gigantum bases\n\n Returns:\n list\n \"\"\"\n return ['jupyter', 'notebook', 'jupyterlab', 'rstudio']\n\n @property\n def reserved_ports(self) -> list:\n \"\"\"A property for all reserved application ports. The following ports are currently reserved:\n\n 8888 - jupyter\n 8787 - rstudio\n 8686 - reserved for future expansion\n 8585 - reserved for future expansion\n 8484 - reserved for future expansion\n 8383 - reserved for future expansion\n\n Returns:\n list\n \"\"\"\n return [8888, 8787, 8686, 8585, 8484, 8383]\n\n def add_bundled_app(self, port: int, name: str, description: str, command: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"Add a \"bundled app\" configuration to this labbook\n\n Args:\n port(int): port number to expose from the container (will be routed to the browser)\n name(str): name of the bundled app\n description(str): description of the bundled app\n command(str): command to run in the container if needed to start the app\n\n Returns:\n dict\n \"\"\"\n # Check if a reserved application name, currently:\n if name.lower() in self.reserved_names:\n raise ValueError(f\"{name} is a reserved application name. Try again.\")\n\n if len(name) > 10 or len(name) < 1:\n raise ValueError(f\"{name} must be 10 characters or less.\")\n\n if len(description) > 240:\n raise ValueError(f\"{description} must be 240 characters or less.\")\n\n if command:\n if len(command) > 1024:\n raise ValueError(f\"{command} must be 1024 characters or less.\")\n\n # Base64 encode the command to avoid escaping issues when persisting to json file\n command = base64.b64encode(command.encode()).decode()\n\n # Check if a reserved port currently\n if port in self.reserved_ports:\n raise ValueError(f\"Port {port} is a in reserved port. Try a different port.\")\n\n data = self._load_bundled_app_data()\n\n # Check for port already in use\n for app in data:\n if data[app].get('port') == port:\n raise ValueError(f\"Port {port} is already in use. Try again.\")\n\n data[name] = {'port': port,\n 'description': description,\n 'command': command}\n\n with open(self.bundled_app_file, 'wt') as bf:\n json.dump(data, bf)\n\n # Commit the changes\n self.labbook.git.add(self.bundled_app_file)\n commit = self.labbook.git.commit(f\"Committing bundled app\")\n\n adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT,\n show=False,\n action=ActivityAction.CREATE,\n data=TextData('plain', f\"App configuration: {json.dumps(data[name])}\"))\n\n ar = ActivityRecord(ActivityType.ENVIRONMENT,\n message=f\"Added app '{name}'\",\n show=True,\n linked_commit=commit.hexsha,\n detail_objects=DetailRecordList([adr]),\n tags=ImmutableList([\"environment\", \"docker\", \"bundled_app\"]))\n\n ars = ActivityStore(self.labbook)\n ars.create_activity_record(ar)\n\n return data\n\n def remove_bundled_app(self, name: str) -> None:\n \"\"\"Remove a bundled app from this labbook\n\n Args:\n name(str): name of the bundled app\n\n Returns:\n None\n \"\"\"\n data = self._load_bundled_app_data()\n if name not in data:\n raise ValueError(f\"App {name} does not exist. Cannot remove.\")\n\n del data[name]\n\n with open(self.bundled_app_file, 'wt') as baf:\n json.dump(data, baf)\n\n # Commit the changes\n self.labbook.git.add(self.bundled_app_file)\n commit = self.labbook.git.commit(f\"Committing bundled app\")\n\n adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT,\n show=False,\n action=ActivityAction.CREATE,\n data=TextData('plain', f\"Removed bundled application: {name}\"))\n\n ar = ActivityRecord(ActivityType.ENVIRONMENT,\n message=f\"Removed bundled application: {name}\",\n show=True,\n linked_commit=commit.hexsha,\n detail_objects=DetailRecordList([adr]),\n tags=ImmutableList([\"environment\", \"docker\", \"bundled_app\"]))\n\n ars = ActivityStore(self.labbook)\n ars.create_activity_record(ar)\n\n def _load_bundled_app_data(self) -> OrderedDict:\n \"\"\"Load data file or return an empty OrderedDict\n\n Returns:\n OrderedDict\n \"\"\"\n if os.path.isfile(self.bundled_app_file):\n with open(self.bundled_app_file, 'rt') as baf:\n data = json.load(baf, object_pairs_hook=OrderedDict)\n else:\n data = OrderedDict()\n\n return data\n\n def get_bundled_apps(self) -> OrderedDict:\n \"\"\"Get collection of bundled apps in this labbook\n\n Returns:\n None\n \"\"\"\n data = self._load_bundled_app_data()\n\n # b64 decode the commands\n for app in data:\n if data[app]['command']:\n data[app]['command'] = base64.b64decode(data[app]['command']).decode()\n\n return data\n\n def get_docker_lines(self) -> List[str]:\n \"\"\"Method to get lines to add to the dockerfile\n\n Returns:\n list\n \"\"\"\n lines = list()\n data = self.get_bundled_apps()\n\n # Check for port already in use\n for app in data:\n lines.append(f\"EXPOSE {data[app].get('port')}\")\n\n return lines\n", "sub_path": "packages/gtmcore/gtmcore/environment/bundledapp.py", "file_name": "bundledapp.py", "file_ext": "py", "file_size_in_byte": 6800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "gtmcore.labbook.labbook.LabBook", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 76, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 94, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityDetailRecord", "line_number": 100, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityDetailType.ENVIRONMENT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityDetailType", "line_number": 100, "usage_type": "name"}, {"api_name": "gtmcore.activity.ActivityAction.CREATE", "line_number": 102, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityAction", "line_number": 102, "usage_type": "name"}, {"api_name": "gtmcore.activity.utils.TextData", "line_number": 103, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityRecord", "line_number": 105, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityType.ENVIRONMENT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityType", "line_number": 105, "usage_type": "name"}, {"api_name": "gtmcore.activity.utils.DetailRecordList", "line_number": 109, "usage_type": "call"}, {"api_name": "gtmcore.activity.utils.ImmutableList", "line_number": 110, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityStore", "line_number": 112, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 133, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityDetailRecord", "line_number": 139, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityDetailType.ENVIRONMENT", "line_number": 139, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityDetailType", "line_number": 139, "usage_type": "name"}, {"api_name": "gtmcore.activity.ActivityAction.CREATE", "line_number": 141, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityAction", "line_number": 141, "usage_type": "name"}, {"api_name": "gtmcore.activity.utils.TextData", "line_number": 142, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityRecord", "line_number": 144, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityType.ENVIRONMENT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "gtmcore.activity.ActivityType", "line_number": 144, "usage_type": "name"}, {"api_name": "gtmcore.activity.utils.DetailRecordList", "line_number": 148, "usage_type": "call"}, {"api_name": "gtmcore.activity.utils.ImmutableList", "line_number": 149, "usage_type": "call"}, {"api_name": "gtmcore.activity.ActivityStore", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 162, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 162, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 164, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 154, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 179, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "541401871", "text": "from django.urls import path\nfrom . import views\nfrom django.contrib.auth.views import logout\n\napp_name = 'todo'\n\nurlpatterns = [\n path('', views.home_view, name='index'),\n path('logout/', logout, {'next_page': 'todo:index'}, name='logout'),\n path('add', views.TodoCreateView.as_view(), name='add_todo'),\n path('/delete', views.TodoDeleteView.as_view(), name='delete_todo'),\n path('/edit', views.TodoEditView.as_view(), name='edit_todo'),\n path('/done', views.done_view, name='done_todo'),\n\n]", "sub_path": "todo/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.logout", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "307158340", "text": "#Jose Luis Mata Lomelí\r\n#Crear dibujos tipo espirografo\r\n\r\nimport pygame\r\nimport math\r\nimport random\r\n\r\n#Dimensiones\r\nAncho = 800\r\nAlto = 800\r\n\r\n#Colores\r\nBLANCO = (255, 255, 255)\r\n\r\ndef dibujar(r, R, l):\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n # Crea una ventana de Ancho * Alto\r\n ventana = pygame.display.set_mode((Ancho, Alto)) # Crea la ventana donde dibujara\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecucion, iniciamos suponiendo que no\r\n\r\n while not termina: # Ciclo principal, MIENTRAS la variable termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True # Queremos terminar el ciclo\r\n\r\n # Borrar pantalla\r\n ventana.fill(BLANCO)\r\n\r\n\r\n k = r/R\r\n periodo = r//math.gcd(r, R)\r\n\r\n #color\r\n colorrandom1 = (random.randrange(255), random.randrange(255), random.randrange(255))\r\n colorrandom2 = (random.randrange(255), random.randrange(255), random.randrange(255))\r\n colorrandom3 = (random.randrange(255), random.randrange(255), random.randrange(255))\r\n colorrandom4 = (random.randrange(255), random.randrange(255), random.randrange(255))\r\n\r\n #circulo 1\r\n u = r * 2\r\n U = R * 2\r\n o = l * 2\r\n p = u / U\r\n\r\n #circulo 2\r\n f = r * 3\r\n F = R * 3\r\n h = l * 3\r\n j = f / F\r\n\r\n #circulo 3\r\n z = r * 4\r\n Z = R * 4\r\n v = l * 4\r\n b = z / Z\r\n\r\n\r\n#Primer Circulo\r\n\r\n for angulo in range(0, 360 * periodo, 1):\r\n a = math.radians(angulo)\r\n x = int(R * ((1-k) * math.cos(a) + (l * k * math.cos(((1-k)/k)*a))))\r\n y = int(R * ((1-k) * math.sin(a) - (l * k * math.sin(((1-k)/k)*a))))\r\n pygame.draw.circle(ventana, colorrandom1, (x + Ancho//2, Alto//2 - y), 1, 1)\r\n\r\n#Segundo Circulo\r\n\r\n for angulo in range(0, 360 * periodo, 1):\r\n a = math.radians(angulo)\r\n x = int(R * ((1-p) * math.cos(a) + (o * p * math.cos(((1-p)/p)*a))))\r\n y = int(R * ((1-p) * math.sin(a) - (o * p * math.sin(((1-p)/p)*a))))\r\n pygame.draw.circle(ventana, colorrandom2, (x + Ancho//2, Alto//2 - y), 1, 1)\r\n\r\n#Tercer Circulo\r\n\r\n for angulo in range(0, 360 * periodo, 1):\r\n a = math.radians(angulo)\r\n x = int(R * ((1-j) * math.cos(a) + (h * j * math.cos(((1-j)/j)*a))))\r\n y = int(R * ((1-j) * math.sin(a) - (h * j * math.sin(((1-j)/j)*a))))\r\n pygame.draw.circle(ventana, colorrandom3, (x + Ancho//2, Alto//2 - y), 1, 1)\r\n\r\n#Cuarto Circulo\r\n\r\n for angulo in range(0, 360 * periodo, 1):\r\n a = math.radians(angulo)\r\n x = int(R * ((1-b) * math.cos(a) + (v * b * math.cos(((1-b)/b)*a))))\r\n y = int(R * ((1-b) * math.sin(a) - (v * b * math.sin(((1-b)/b)*a))))\r\n pygame.draw.circle(ventana, colorrandom4, (x + Ancho//2, Alto//2 - y), 1, 1)\r\n\r\n pygame.display.flip() # Actualiza trazos (Si no llamas a esta funcion, entonces no se dibuja)\r\n reloj.tick(1)\r\n\r\n pygame.quit() # termina pygame\r\n\r\n\r\n# Funcion principal\r\ndef main():\r\n\r\n r = int(input(\"Valor de r: \"))\r\n R = int(input(\"Valor de R: \"))\r\n l = float(input(\"Valor de l: \"))\r\n dibujar(r, R, l) # Aplicar la funcion y dibujar\r\n\r\nmain()\r\n", "sub_path": "misionImposible-mision6.py", "file_name": "misionImposible-mision6.py", "file_ext": "py", "file_size_in_byte": 3559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "math.gcd", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 40, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 64, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 65, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 67, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 72, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 73, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 75, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 80, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 83, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 88, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 89, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "456678305", "text": "from readMetrics import readMetrics\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import cm\nfrom scipy.interpolate import splrep, splev, interp1d\nfrom math import log10, floor\n\nMAXRAD = ['90', '92', '94', '96']\nMAXRAD = ['90', '99']\n#MAXRAD = ['80','82','84','86','88','90', '92', '94', '96', '98']\nMAXRAD = ['90','91','92','93','94','95','96','97','98','99']\n#MAXRAD = ['99']\n\ncolor = iter(cm.brg(np.linspace(0.0,0.8, len(MAXRAD))))\nfs = 24\nplt.figure(figsize=(14,12))\nfor maxrad in MAXRAD :\n\tc = next(color)\n\t(STR, CA, EPS, REDVOL, LAMBDA, AREA, VLME, LENGTH, DP, ETA) = readMetrics(maxrad)\n\n\tv = round(REDVOL[0],2)\n\tlam = LAMBDA[0]\n\n\t# calculate lamc from the cubic equuation: 2 v x^3 - 3 x + 1 = 0\n\tif (abs(v - 0.95) < 1e-12) :\n\t\tlamc = 1.2324\n\tif (abs(v - 0.90) < 1e-12) :\n\t\tlamc = 1.3712\n\tif (abs(v - 0.85) < 1e-12) :\n\t\tlamc = 1.5050\n\tif (abs(v - 0.80) < 1e-12) :\n\t\tlamc = 1.6437\n\tif (abs(v - 0.75) < 1e-12) :\n\t\tlamc = 1.7925\n\tif (abs(v - 0.70) < 1e-12) :\n\t\tlamc = 1.9562\n\tif (abs(v - 0.65) <= 0.01) :\n\t\tlamc = 2.1397\n\t\n\tlam = lam/lamc\n\tdelta = 1 - lam\n\n#\tfc = 7.5112\n#\tn = 0.8776\n#\tn = 0.86\n#\t#fc = 19.041\n#\t#n = 0.9931\n#\tdelta = pow(delta,n)\n#\tdelta = 1\n#\tfc = 1\n\t\n\tlam = round(lam,2)\n\tv = round(v,2)\n\n#\tCE = CA\n#\tDPE = DP\n#\tfor i in range(len(CE)) :\n#\t\tCE[i] = CA[i]/(EPS[i]*EPS[i])\n#\t\tDPE[i] = DP[i]*EPS[i]\n#\txL = CE\n#\tyL = DPE\n\n\txL = CA/(delta*delta)\n\tif (STR == 'DP') :\n\t\tyL = DP*delta\n\t#\tyL = DP\n\t#\tfor i in range(len(yL)) :\n\t#\t\tyL[i] = yL[i]/(8.0*(1.0 - EPS[i])*LENGTH[i])\n\tif (STR == 'EPS') :\n\t\tyL = (EPS/delta)\n\t\n\t\n\t## interpolate to xi using splines\n\t#tck = interpolate.splrep(x, y, s=0)\n\t#yi = interpolate.splev(xi, tck, der=0)\n\n\txi = np.logspace(floor(log10(min(xL))), floor(log10(max(xL))), 10000)\n\ttck = splrep(xL, yL, s=0)\n\tyi = splev (xi, tck, der=0)\n\n#\tf = interp1d(xL, yL)\n#\tyi = f(xi)\n\n\tplt.plot(xi,yi,'-',c=c)\n\tplt.plot(xL,yL,'.',c=c)\n\t\n\tif (maxrad == MAXRAD[0] or maxrad == MAXRAD[-1]) :\n\t\tplt.text(1000, yi[-1], str(lam),fontsize = fs-8)\n\t\n\tif (maxrad == MAXRAD[0]) :\n\t\tlam0 = lam\n\t\n\tif (maxrad == MAXRAD[-1]) :\n\t\tlam1 = lam\n\n# set plot preferences\nplt.title('$v$ = 0.' + str(int(v*100)) + ', $\\lambda/\\lambda_c$ = ' + str(lam0) + ' to ' + str(lam1),fontsize=fs)\nplt.xscale('log')\n#plt.yscale('symlog')\nplt.xticks(fontsize=fs-4)\nplt.yticks(fontsize=fs-4)\nplt.xlim((0.01,1000))\n\nplt.xlabel('$\\mathrm{Ca} = \\mu U R^2 / (\\epsilon^2 \\kappa)$',fontsize=fs)\nif (STR == 'DP') :\n\tplt.ylabel('$\\epsilon \\Delta p R/ (\\mu U)$',fontsize=fs)\nif (STR == 'EPS') :\n\tplt.ylabel('$Q/(\\pi \\epsilon R^2 U) = 1 - V/U$',fontsize=fs)\nif (STR == 'AREA') :\n\tplt.ylabel('$A/R^2$',fontsize=fs)\nif (STR == 'VLME') :\n\tplt.ylabel('$\\Omega/R^3$',fontsize=fs)\nif (STR == 'REDVOL') :\n\tplt.ylabel('$v$',fontsize=fs)\nif (STR == 'GAMMAX') :\n\tplt.ylabel('$\\gamma_{\\mathrm{max}}/ (\\mu U)$',fontsize=fs)\nif (STR == 'TAUMAX') :\n\tplt.ylabel(r'$\\tau_{\\mathrm{max}}R/ (\\mu U)$',fontsize=fs)\n\n\nplt.show()\n", "sub_path": "C++/draft01/ALT/highCa/postproc/metrics/master/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 2928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.cm.brg", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "readMetrics.readMetrics", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 77, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 77, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 77, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 78, "usage_type": "call"}, {"api_name": "scipy.interpolate.splev", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "477060780", "text": "from echopype.echodata import EchoData\n\nimport xarray as xr\nimport numpy as np\n\n\ndef test_harmonize_env_param_time():\n # Scalar\n p = 10.05\n assert EchoData._harmonize_env_param_time(p=p) == 10.05\n\n # time1 length=1, should return length=1 numpy array\n p = xr.DataArray(\n data=[1],\n coords={\n \"time1\": np.array([\"2017-06-20T01:00:00\"], dtype=\"datetime64[ns]\")\n },\n dims=[\"time1\"]\n )\n assert EchoData._harmonize_env_param_time(p=p) == 1\n\n # time1 length>1, interpolate to tareget ping_time\n p = xr.DataArray(\n data=np.array([0, 1]),\n coords={\n \"time1\": np.arange(\"2017-06-20T01:00:00\", \"2017-06-20T01:00:31\", np.timedelta64(30, \"s\"), dtype=\"datetime64[ns]\")\n },\n dims=[\"time1\"]\n )\n # ping_time target is identical to time1\n ping_time_target = p[\"time1\"].rename({\"time1\": \"ping_time\"})\n p_new = EchoData._harmonize_env_param_time(p=p, ping_time=ping_time_target)\n assert (p_new[\"ping_time\"] == ping_time_target).all()\n assert (p_new.data == p.data).all()\n # ping_time target requires actual interpolation\n ping_time_target = xr.DataArray(\n data=[1],\n coords={\n \"ping_time\": np.array([\"2017-06-20T01:00:15\"], dtype=\"datetime64[ns]\")\n },\n dims=[\"ping_time\"]\n )\n p_new = EchoData._harmonize_env_param_time(p=p, ping_time=ping_time_target[\"ping_time\"])\n assert p_new[\"ping_time\"] == ping_time_target[\"ping_time\"]\n assert p_new.data == 0.5\n\n\n\n ", "sub_path": "echopype/tests/echodata/test_echodata_misc.py", "file_name": "test_echodata_misc.py", "file_ext": "py", "file_size_in_byte": 1519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "echopype.echodata.EchoData._harmonize_env_param_time", "line_number": 10, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData", "line_number": 10, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData._harmonize_env_param_time", "line_number": 20, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData", "line_number": 20, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.timedelta64", "line_number": 26, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData._harmonize_env_param_time", "line_number": 32, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData", "line_number": 32, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData._harmonize_env_param_time", "line_number": 43, "usage_type": "call"}, {"api_name": "echopype.echodata.EchoData", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "28841343", "text": "from typing import Dict\n\nimport numpy\nfrom overrides import overrides\n\nfrom allennlp.data.fields.field import Field\n\n\nclass ArrayField(Field[numpy.ndarray]):\n \"\"\"\n A class representing an array, which could have arbitrary dimensions.\n A batch of these arrays are padded to the max dimension length in the batch\n for each dimension.\n \"\"\"\n def __init__(self, array: numpy.ndarray, padding_value: int = 0) -> None:\n self.array = array\n self.padding_value = padding_value\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n return {\"dimension_\" + str(i): shape\n for i, shape in enumerate(self.array.shape)}\n\n @overrides\n def as_array(self, padding_lengths: Dict[str, int]) -> numpy.ndarray:\n max_shape = [padding_lengths[\"dimension_{}\".format(i)]\n for i in range(len(padding_lengths))]\n\n return_array = numpy.ones(max_shape, \"float32\") * self.padding_value\n\n # If the array has a different shape from the largest\n # array, pad dimensions with zeros to form the right\n # shaped list of slices for insertion into the final array.\n slicing_shape = list(self.array.shape)\n if len(self.array.shape) < len(max_shape):\n slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]\n slices = [slice(0, x) for x in slicing_shape]\n return_array[slices] = self.array\n return return_array\n\n @overrides\n def empty_field(self): # pylint: disable=no-self-use\n return ArrayField(numpy.array([], dtype=\"float32\"))\n", "sub_path": "src/allennlp/data/fields/array_field.py", "file_name": "array_field.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "allennlp.data.fields.field.Field", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 15, "usage_type": "attribute"}, {"api_name": "overrides.overrides", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "overrides.overrides", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "overrides.overrides", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "575911156", "text": "from django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom .forms import MicrotasksForm\nfrom .models import Microtasks,MAL_Requirements\n# Create your views here.\n\ndef microtask(request):\n if request.method == 'POST':\n form = MicrotasksForm(request.POST, request.FILES)\n if form.is_valid():\n\n job_name = form.cleaned_data['job_name']\n\n form.save()\n messages.success(request, f'Account created for {job_name}! You have to login')\n return redirect('/')\n else:\n form = MicrotasksForm()\n\n return render(request, 'microtask.html', {'form': form})\n\n\ndef index(request):\n microtask = Microtasks.objects.all()\n # category = MAL_Requirements.objects.get(microtask.Category_of_the_microtask)\n\n context = {'microtask':microtask}\n return render(request, 'MalForm.html', context)\n\ndef handleSubmit(request):\n if request.method == 'POST':\n MAL_Job_Identification_Number = request.POST['malno']\n Assembly_line_ID = request.POST['asi']\n Name_of_the_Assembly_line = request.POST['nameassembly']\n Name_of_the_person_incharge_of_the_MAL = request.POST['personname']\n Link_of_the_output_folder = request.POST['link1']\n Name_of_the_micro_task = request.POST['microtask']\n Category_of_the_Microtask = request.POST['category']\n Target_date = request.POST['td']\n Total_budget_allocated_for_the_job = request.POST['budget']\n Job_description = request.POST['jd']\n Upload_job_sample = request.POST['jobsample']\n Upload_Job_instructions = request.POST['instruction']\n Quantity_of_the_Job = request.POST['quantity']\n Link_of_the_Input_folder = request.POST['link2']\n data = MAL_Requirements(\n MAL_Job_Identification_Number=MAL_Job_Identification_Number, \n Assembly_line_ID=Assembly_line_ID,\n Name_of_the_Assembly_line=Name_of_the_Assembly_line, \n Name_of_the_person_incharge_of_the_MAL=Name_of_the_person_incharge_of_the_MAL, \n Link_of_the_output_folder=Link_of_the_output_folder,\n microtask=Name_of_the_micro_task, \n microtask_category=Category_of_the_Microtask, \n Target_date=Target_date, \n Total_budget_allocated_for_the_job=Total_budget_allocated_for_the_job,\n Job_description=Job_description,\n Uploadjob_sample=Upload_job_sample, \n UploadJob_instructions=Upload_Job_instructions, \n Quantity_of_the_Job=Quantity_of_the_Job, \n Link_of_the_Input_folder=Link_of_the_Input_folder\n )\n data.save()\n return redirect('index')\n \ndef posting_page(request,pk=None):\n if request.user.is_active: \n if pk is not None:\n try:\n data = Microtasks.objects.get(id=pk)\n except:\n data = \"NA\"\n return render(request,'JobPosting_Page.html', {'datas': data})\n return render(request,'JobPosting_Page.html') ", "sub_path": "jobs/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "forms.MicrotasksForm", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.MicrotasksForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Microtasks.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Microtasks.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Microtasks", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "models.MAL_Requirements", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Microtasks.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Microtasks.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Microtasks", "line_number": 69, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "238164153", "text": "'''\nRequirments:\n python &\n > pip install pillow\n > pip install matplotlib\n > pip install numpy\nRun:\n > python task1.py path/to/image\n'''\n\nimport PIL\nfrom PIL import Image\nimport numpy as np\nimport itertools\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport mylib\nfrom mylib import BOX\n\n\n# open image\nimage = Image.open(sys.argv[1]) # you have to pass the input image path as input arg\nimage = image.convert(\"L\") # convert to signle channeled image\n\n\noutDir = sys.argv[2]\n\nif not os.path.exists(outDir):\n os.makedirs(outDir)\nwidth, height = image.size\n\n\ntotalPixels = width* height\n\nfreq = [0] * 256 # fill\ncProbability = [0] * 256 # fill zeros\n\n\n\n# save original image histogram\nfreq = image.histogram()\na = np.array(image)\nplt.hist(a.ravel(), bins=256)\nplt.ylabel('Probability')\nplt.xlabel('Gray Level')\n\nimage.save(outDir+'/input.jpg')\nplt.savefig(outDir+'/inputhist.svg')\nplt.show()\n\n\n\ncenterX,centerY = (int(width/2),int(height/2))\n\n\n\n# HISTOGRAM EQUALIZATION \neditableImage = image.load()\nimage = mylib.equalizeHistogram(image,editableImage,BOX(0,0,centerX,centerY))\nimage = mylib.equalizeHistogram(image,editableImage,BOX(centerX,0,width,centerY))\nimage = mylib.equalizeHistogram(image,editableImage,BOX(0,centerY,centerX,height))\niamge = mylib.equalizeHistogram(image,editableImage,BOX(centerX,centerY,width,height))\n\n\n\n\n\n\n# save resultant image and histogram\nimage.save(outDir+'/output.jpg')\na = np.array(image)\nplt.hist(a.ravel(), bins=256)\nplt.savefig(outDir+'/outputhist.svg')\nplt.show()", "sub_path": "DIP/LABS/lab08/solution/task2.py", "file_name": "task2.py", "file_ext": "py", "file_size_in_byte": 1522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PIL.Image.open", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 23, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "mylib.equalizeHistogram", "line_number": 60, "usage_type": "call"}, {"api_name": "mylib.BOX", "line_number": 60, "usage_type": "call"}, {"api_name": "mylib.equalizeHistogram", "line_number": 61, "usage_type": "call"}, {"api_name": "mylib.BOX", "line_number": 61, "usage_type": "call"}, {"api_name": "mylib.equalizeHistogram", "line_number": 62, "usage_type": "call"}, {"api_name": "mylib.BOX", "line_number": 62, "usage_type": "call"}, {"api_name": "mylib.equalizeHistogram", "line_number": 63, "usage_type": "call"}, {"api_name": "mylib.BOX", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "445572431", "text": "\"\"\"\nmapsAPI.py\nSource Code for Google Maps API or interaction between genetic algorithm and google maps\nAuthor(s): Niklaas Cotta\nCIS422 FA21\nTeam NASAK\nCreation Date: 10/9/21\nSources:\n List of APIs: https://developers.google.com/maps/documentation\n Distance Matrix Documentation: https://developers.google.com/maps/documentation/distance-matrix/start\nDescription:\n This file contains two functions. The first function, get_distance() contains the distance API call to google maps.\n The second function, genMatrix(), creates a complete graph containing n addresses as vertices. This graph is in matrix\n form. For each vertex (address) pair, calculates the distance between the two.\n\"\"\"\nimport requests # this library is how python makes requests to APIs\nimport regex # this library is for parsing strings with regular expressions\n\n\ndef get_distance(origin, destination):\n \"\"\"\n This function takes a source string and a destination string in the form of an address.\n Address may be in following forms:\n 1710 E 15th Ave, OR\n 6513 Flag Way Dr, Boise, Idaho\n Seattle, Washington\n San Francisco, CA\n Function then requests distance between source and destination from google maps API.\n If successful request, gets the distance from the json, and converts the distance from a string to integer\n On failure returns -1. On success returns distance between the two places.\n :param origin: str\n :param destination: str\n :return: float (distance between the two)\n \"\"\"\n\n # get API key\n fp = open(\"api-key.txt\", \"r\") # open file containing api key\n API_KEY = fp.read()\n fp.close()\n\n # base url, used later in request to google\n url = f\"https://maps.googleapis.com/maps/api/distancematrix/json?\" \\\n f\"origins={origin}&destinations={destination}\" \\\n f\"&units=imperial&key={API_KEY}\"\n\n response = requests.request(\"GET\", url, headers={}, data={}) # query response from maps, this is the API call\n if response.status_code != 200: # 200 means OK\n print(\"Could not get distance from API\")\n return -1\n\n response = response.json() # convert response into json format\n distancestr = response[\"rows\"][0][\"elements\"][0][\"distance\"][\"text\"] # isolate .json element\n\n # this just turns the string into a float number\n found = regex.search(r\"\\d*[,.]*\\d*\", distancestr)\n distance = float(found.group().replace(\",\", \"\"))\n\n return distance # float\n\n########################################################################################################################\n\n\ndef genMatrix(addressList):\n \"\"\"\n This function takes a list of addresses (strings) and generates a complete graph of distances between addresses.\n This graph is in the form of a matrix where each index corresponds to an address, in the order of addressList.\n After initially populating the matrix with 0s, the graph then calls get_distance() between each pair of addresses.\n The graph is undirected, so the matrix will have symmetry.\n :param addressList: list of str\n :return: tuple containing list of list of distances (matrix) and list of addresses (strings)\n \"\"\"\n matrix = [] # empty var to be filled\n n = len(addressList) # get length\n\n # populate initial matrix with 0's (n x n matrix)\n for j in range(n):\n matrix.append([]) # add a \"row\"\n for _ in range(n):\n matrix[j].append(0)\n\n # update matrix with actual distances\n for j in range(n):\n for i in range(n):\n if i != j: # distance from x to x is 0\n distance = get_distance(addressList[i], addressList[j]) # api call\n matrix[j][i] = distance # insert distance into correct coordinate\n\n if any(-1 in row for row in matrix): # make sure there are no invalid distances\n print(\"WARNING: Distance matrix contains invalid distance (-1). API function could not grab distance. Program will continue\")\n\n return matrix, addressList # returns tuple containing address list and corresponding matrix\n\n########################################################################################################################\n\n\nif __name__ == '__main__':\n # Example\n newAddresses = [\"NYC, NY\", \"1710 E 15th Ave, Eugene,OR\", \"Cocoa Beach,FL\", \"Seattle, Washington\"]\n newMatrix, addresses = genMatrix(newAddresses)\n\n for address in newMatrix:\n print(address)\n", "sub_path": "webapp/api/mapsAPI.py", "file_name": "mapsAPI.py", "file_ext": "py", "file_size_in_byte": 4441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.request", "line_number": 46, "usage_type": "call"}, {"api_name": "regex.search", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "523380136", "text": "import numpy as np\nimport operator # k-近邻算法执行排序操作\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\n\ndef createDataSet():\n group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])# 测量点\n labels = ['A', 'A', 'B', 'B'] # 数据点的标签信息,元素个数等于group矩阵行数\n return group, labels\n\n\n# 参数:inX:用于分类的输入向量;\n# dataSet:输入的训练样本集;\n# labels:标签向量;\n# k: 用于选择最近邻居的数目\n\ndef classify0(inX, dataSet, labels, k):\n # 距离计算\n dataSetSize = dataSet.shape[0]\n diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n\n # 按照距离递增次序排序\n sortedDistIndicies = distances.argsort()\n classCount = {}\n\n # 选取与当前点距离最小的k个点\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n\n # 确定前k个点所在类别的出现频率,运算符模块的itemgetter方法,此处为从大到小排\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n\n # 返回前k个点出现频率最高的类别作为当前点的预测分类。\n return sortedClassCount[0][0]\n\n\n# 样本文件:datingTestSet.txt\n# 样本特征:每年获得的飞行常客里程数; 玩视频游戏所耗时间的百分比; 每周消费的冰淇淋公升数\n# 程序功能: 将文本记录转换为NumPy的解析程序\ndef file2matrix(filename):\n # 得到文件的行数\n fr = open(filename)\n arrayOLines = fr.readlines()\n numberOfLines = len(arrayOLines)\n\n # 创建返回的NumPy矩阵\n returnMat = np.zeros((numberOfLines, 3)) # 这里第二个维度设置为3\n classLabelVector = []\n index = 0\n\n # 解析文件数据到列表\n for line in arrayOLines:\n line = line.strip()\n listFromLine = line.split('\\t')\n returnMat[index, :] = listFromLine[0:3]\n classLabelVector.append(int(listFromLine[-1]))\n index += 1\n\n # 返回训练样本矩阵和类标签向量\n return returnMat, classLabelVector\n\n# 程序功能:自动将数字特征值转化为0到1的区间\n# 程序输入:样本特征集数据集\n# 程序返回: 矩阵, range: 取值范围; minValue: 最小值\ndef autoNorm(dataSet):\n minValues = dataSet.min(0) # 参数0使得函数可以从列中选取最小值,而不是选取当前行的最小值\n maxValues = dataSet.max(0)\n ranges = maxValues - minValues\n normDataSet = np.zeros(np.shape(dataSet))\n m = dataSet.shape[0]\n normDataSet = dataSet - np.tile(minValues, (m, 1))\n\n # 特征值相除,非矩阵除法,矩阵除法需要用np.linalg.solve(matA,matB)\n normDataSet = normDataSet / np.tile(ranges, (m, 1))\n return normDataSet, ranges, minValues\n\n\n# 程序功能:用于分类器针对约会网站的测试\n# 程序输出:分类器的错误率\ndef datingClassTest():\n hoRatio = 0.10\n\n # 从文件中读取数据并将其转化为归一化特征值\n datingDataMat, datingLabels = file2matrix('./files/datingTestSet2.txt')\n normMat, ranges, minValues = autoNorm(datingDataMat)\n\n # 计算测试向量的数量,决定了 normMat向量中哪些数据用于测试,哪些数据用于分类器的训练样本;\n m = normMat.shape[0]\n numTestVecs = int(m * hoRatio)\n\n errorCount = 0\n for i in range(numTestVecs):\n classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)\n print('预测结果:%d , 真实结果:%d' % (classifierResult, datingLabels[i]))\n if (classifierResult != datingLabels[i]):\n errorCount += 1.0\n print('最终出错率为:%f' % (errorCount / float(numTestVecs)))\n\n\n# 程序功能:约会网站预测指数\ndef classifyPerson():\n resultList = ['不太喜欢', '可以做朋友', '可以交往']\n percentTats = float(input(\"请问你每天的时间花在看视频和游戏上的占比是多少?/n\"))\n ffMiles = float(input(\"请问你每年的飞行常客里程数是多少?\\n\"))\n iceCream = float(input(\"请问你周吃多公升冰淇淋?\"))\n datingDataMat, datingLabels = file2matrix('./files/datingTestSet2.txt')\n normMat, ranges, minValues = autoNorm(datingDataMat)\n inArr = np.array([ffMiles, percentTats, iceCream])\n classifierResult = classify0((inArr-minValues)/ranges, normMat, datingLabels, 3)\n print(\"海伦可能会对你说:%s\"%resultList[classifierResult-1])\n\n\n# 程序功能:将图像转换为向量\n# 程序输入:文件路径\n# 程序返回:NumPy数组\ndef img2vector(filename):\n returnVect = np.zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32*i+j] = int(lineStr[j])\n return returnVect\n\n\ndef handwritingClassTest():\n hwLabels = []\n trainingFileList = os.listdir('./files/trainingDigits')\n m = len(trainingFileList)\n trainingMat = np.zeros((m, 1024))\n\n # 将训练样本数据集转化成矩阵\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector('./files/trainingDigits/%s' % fileNameStr)\n\n # 将测试数据也转化成NumP数组中\n testFileList = os.listdir('./files/testDigits')\n errorCount = 0.0\n mTest = len(testFileList)\n for i in range(mTest):\n fileNameStr = testFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n vectorUnderTest = img2vector('./files/testDigits/%s' % fileNameStr)\n\n # 使用kNN进行分类\n classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)\n\n # 打印测试结果\n print(\"预测结果为:%d;实际结果为:%d\" % (classifierResult, classNumStr))\n if (classifierResult != classNumStr):\n errorCount += 1\n print(\"预测结果为:%d;实际结果为:%d;错误: %s\" % (classifierResult, classNumStr, fileNameStr))\n print(\"最终错误个数为:%d;错误率为:%f;\" % (errorCount, errorCount / float(mTest)))\n\n\ndef main(args):\n if args.example:\n if args.example=='dating':\n datingClassTest()\n return\n elif args.example=='handwriting':\n handwritingClassTest()\n return\n dataSet,labels=file2matrix(args.path)\n type=classify0(args.feature,dataSet,labels,2)\n print(type)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-path\",help='File Path, Just Like: -path ./files/kNN_test.txt',type=str)\n parser.add_argument('-feature', nargs='+', help='Feature List, Just Like: -feature 0.2 0.1',type=float)\n parser.add_argument('-example',help=\"kNN Example, You Can Choose From: dating、handwriting. If you set the example argument, your needn't set other argument\")\n args = parser.parse_args()\n main(args)\n\n # result = classify0([0, 0], datingDataMat, datingLabels, 3)\n # # print(result)\n", "sub_path": "MLzhoupengwu/kNN.py", "file_name": "kNN.py", "file_ext": "py", "file_size_in_byte": 7339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 22, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 147, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "179960298", "text": "\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime,date\nimport requests\nimport os\nimport sys\nimport re\nimport time\nfrom subprocess import call,Popen,check_output,PIPE\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'includes'))\nfrom customSettings import repoDir,djangoDir,djangoSettings,startFinYear,panchayatCrawlThreshold,panchayatRetryThreshold,telanganaStateCode,panchayatAttemptRetryThreshold,apStateCode,crawlRetryThreshold,crawlProcessTimeThreshold,logDir\n#from crawlFunctions import crawlPanchayat,crawlPanchayatTelangana,libtechCrawler\nfrom libtechCrawler import libtechCrawler\nsys.path.insert(0, repoDir)\nfileDir=os.path.dirname(os.path.abspath(__file__))\nsys.path.append(djangoDir)\n\nfrom libtechInit import libtechLoggerFetch\nfrom nregaFunctions import stripTableAttributes,htmlWrapperLocal,getCurrentFinYear,savePanchayatReport,table2csv,getFullFinYear\nfrom wrappers.logger import loggerFetch\n\nimport django\nfrom django.core.wsgi import get_wsgi_application\nfrom django.core.files.base import ContentFile\nfrom django.utils import timezone\nfrom django.db.models import F,Q,Sum,Count\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", djangoSettings)\ndjango.setup()\n\nfrom nrega.models import State,District,Block,Panchayat,Muster,LibtechTag,CrawlQueue,CrawlState\n\ndef argsFetch():\n '''\n Paser for the argument list that returns the args list\n '''\n import argparse\n\n parser = argparse.ArgumentParser(description='These scripts will initialize the Database for the district and populate relevant details')\n parser.add_argument('-l', '--log-level', help='Log level defining verbosity', required=False)\n parser.add_argument('-limit', '--limit', help='Limit on the number of results', required=False)\n parser.add_argument('-downloadLimit', '--downloadLimit', help='Limit on the number of results', required=False)\n parser.add_argument('-s', '--stateCode', help='State for which the delayed payment report needs to be crawld', required=False)\n parser.add_argument('-sf', '--startFinYear', help='From which financial year data needs to be crawled default is 2015-2016', required=False)\n parser.add_argument('-step', '--step', help='Step for which the script needs to run', required=False)\n parser.add_argument('-pc', '--panchayatCode', help='Panchayat for which the delayed payment report needs to be crawld', required=False)\n parser.add_argument('-qid', '--qid', help='Queue Id for which this needs to be run', required=False)\n parser.add_argument('-bc', '--blockCode', help='Panchayat for which the delayed payment report needs to be crawld', required=False)\n parser.add_argument('-m', '--manage', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)\n parser.add_argument('-e', '--execute', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)\n parser.add_argument('-p', '--populate', help='Populate CrawlQueue', required=False,action='store_const', const=1)\n parser.add_argument('-f', '--force', help='Force Run a step', required=False,action='store_const', const=1)\n parser.add_argument('-se', '--singleExecute', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)\n parser.add_argument('-i', '--initiateCrawl', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)\n parser.add_argument('-d', '--debug', help='Debug Panchayat Crawl Queue', required=False,action='store_const', const=1)\n parser.add_argument('-t', '--test', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)\n\n args = vars(parser.parse_args())\n return args\n \ndef main():\n args = argsFetch()\n logger = loggerFetch(args.get('log_level'))\n if args['initiateCrawl']:\n logger.debug(\"This script is going to initiate crawl\")\n if args['step']:\n crawlStates=CrawlState.objects.filter(name=args['step'])\n else:\n #crawlStates=CrawlState.objects.all()\n crawlStates=CrawlState.objects.filter(isActive=True)\n for eachState in crawlStates:\n curStateName=eachState.name\n logger.debug(\"Curent state name is %s \" % curStateName)\n curhour=datetime.now().hour\n nicTimeBand=False\n if (curhour >=6) and (curhour < 20):\n nicTimeBand=True\n scriptDir='%s/custom/crawlScripts/' % djangoDir\n# scriptName='%s/custom/crawlScripts/b.sh %s ' % (djangoDir,str(eachState.id))\n \n logfile=\"/tmp/cq%s.log\" % (curStateName)\n debuglogfile=\"/tmp/debug%s_%s.log\" % (curStateName,str(int(time.time())))\n if ((eachState.nicHourRestriction==False) or ((eachState.nicHourRestriction==True) and (nicTimeBand==True))):\n cmd=\"python %s/crawlMain.py -e -l debug -step %s \" % (scriptDir,curStateName)\n# cmd=scriptName\n p1 = Popen(['pgrep', '-f', cmd], stdout=PIPE)\n mypid = p1.communicate()[0].decode(\"utf-8\").lstrip().rstrip()\n logger.debug(\"Exsiting PID for this command %s is %s \" % (curStateName,str(mypid)))\n if (mypid == \"\"):\n logger.debug(\"We are going to launch this program %s\" % cmd)\n with open(logfile,\"wb\") as f:\n proc = Popen([cmd], shell=True,stdin=None, stdout=f, stderr=None, close_fds=True)\n else:\n mycmd='ps -o etimes= -p %s ' % mypid\n p1 = Popen([mycmd], stdout=PIPE,shell=True)\n output = p1.communicate()[0].decode(\"utf-8\").lstrip().rstrip()\n logger.debug(output)\n if int(output) > crawlProcessTimeThreshold:\n #Before Killing we will copy the log file to check for errors\n mycmd=\"cp %s %s \" % (logfile,debuglogfile)\n p1 = Popen([mycmd], stdout=PIPE,shell=True)\n mycmd=\"kill -9 %s \" % mypid\n p1 = Popen([mycmd], stdout=PIPE,shell=True)\n output = p1.communicate()[0].decode(\"utf-8\").lstrip().rstrip()\n logger.debug(output)\n\n if args['populate']:\n panchayatCode=args['panchayatCode']\n blockCode=args['blockCode']\n if panchayatCode is not None:\n eachPanchayat=Panchayat.objects.filter(code=panchayatCode).first()\n CrawlQueue.objects.create(panchayat=eachPanchayat)\n elif blockCode is not None:\n eachBlock=Block.objects.filter(code=blockCode).first()\n myPanchayats=Panchayat.objects.filter(block=eachBlock)\n for eachPanchayat in myPanchayats:\n CrawlQueue.objects.create(panchayat=eachPanchayat)\n # CrawlQueue.objects.create(block=eachBlock,priority=500)\n\n if args['execute']:\n state=args['step']\n crawlState=CrawlState.objects.filter(name=state).first()\n logFileName=\"%s.log\" % (crawlState.name)\n logger1 = libtechLoggerFetch('debug',filename=logFileName)\n if crawlState is not None:\n limit=args['limit']\n if args['downloadLimit']:\n downloadLimit=int(args['downloadLimit'])\n else:\n downloadLimit=None\n qid=args['qid']\n forceRun=args['force']\n try:\n libtechCrawler(logger1,crawlState,qid=qid,forceRun=forceRun,downloadLimit=downloadLimit)\n except:\n logger1.exception('Got exception on main handler')\n raise\n else:\n logger.info(\"Not a Valid Crawl Step\")\n\n logger.info(\"...END PROCESSING\") \n exit(0)\nif __name__ == '__main__':\n main()\n", "sub_path": "django/n.libtech.info/src/custom/crawlScripts/crawlMain.py", "file_name": "crawlMain.py", "file_ext": "py", "file_size_in_byte": 7211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 14, "usage_type": "call"}, {"api_name": "customSettings.repoDir", "line_number": 14, "usage_type": "argument"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "customSettings.djangoDir", "line_number": 16, "usage_type": "argument"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 27, "usage_type": "call"}, {"api_name": "customSettings.djangoSettings", "line_number": 27, "usage_type": "argument"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 28, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 38, "usage_type": "call"}, {"api_name": "wrappers.logger.loggerFetch", "line_number": 62, "usage_type": "call"}, {"api_name": "nrega.models.CrawlState.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "nrega.models.CrawlState.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "nrega.models.CrawlState", "line_number": 66, "usage_type": "name"}, {"api_name": "nrega.models.CrawlState.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "nrega.models.CrawlState.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "nrega.models.CrawlState", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "name"}, {"api_name": "customSettings.djangoDir", "line_number": 77, "usage_type": "name"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 85, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 85, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 94, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 94, "usage_type": "name"}, {"api_name": "customSettings.crawlProcessTimeThreshold", "line_number": 97, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 100, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 100, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 102, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 102, "usage_type": "name"}, {"api_name": "nrega.models.Panchayat.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "nrega.models.Panchayat.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "nrega.models.Panchayat", "line_number": 110, "usage_type": "name"}, {"api_name": "nrega.models.CrawlQueue.objects.create", "line_number": 111, "usage_type": "call"}, {"api_name": "nrega.models.CrawlQueue.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "nrega.models.CrawlQueue", "line_number": 111, "usage_type": "name"}, {"api_name": "nrega.models.Block.objects.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "nrega.models.Block.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "nrega.models.Block", "line_number": 113, "usage_type": "name"}, {"api_name": "nrega.models.Panchayat.objects.filter", "line_number": 114, "usage_type": "call"}, {"api_name": "nrega.models.Panchayat.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "nrega.models.Panchayat", "line_number": 114, "usage_type": "name"}, {"api_name": "nrega.models.CrawlQueue.objects.create", "line_number": 116, "usage_type": "call"}, {"api_name": "nrega.models.CrawlQueue.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "nrega.models.CrawlQueue", "line_number": 116, "usage_type": "name"}, {"api_name": "nrega.models.CrawlState.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "nrega.models.CrawlState.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "nrega.models.CrawlState", "line_number": 121, "usage_type": "name"}, {"api_name": "libtechInit.libtechLoggerFetch", "line_number": 123, "usage_type": "call"}, {"api_name": "libtechCrawler.libtechCrawler", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "519429431", "text": "# created: 2018-12-01\n# see 'parser' for expected command-line arguments\n\n# goal: be FAST!!!\n\n''' to test, run something like:\n\npython3 compare_inventories.py inventory-files_do-not-add-to-git/2018-12-15-imac-pro.jsonl inventory-files_do-not-add-to-git/2018-12-29-imac-pro.jsonl > /tmp/out\n\nTODOs:\n- display moved files better as a correspondence between first -> second\n- add a FAST MODE that doesn't try to detect directory/file moves\n- is sorted() really needed at some parts? does it really speed things up?\n- wait, if i can detect whether a directory was moved from A to B, but\n more stuff was added into B, then i should be able to detect whether\n something was moved from A to B, with stuff deleted from B, since it's\n SYMMETRIC - you just run the detection algorithm *backwards*, thinking of\n it as something moving from B to A, with more stuff added to A\n - get this test working:\n ./run-test.sh tests/simple-dir-move-plus-delete-files/\n'''\n\n# include slashes in dirnames to prevent spurious substring matches\nDEFAULT_IGNORE_DIRS = ['directory-tree-inventory/inventory-files_do-not-add-to-git',\n '/.git',\n '/node_modules',\n '/__pycache__',\n '.dropbox.cache'] # don't have a leading slash '/' so we can ignore dropbox cache at the top level too\n\nDEFAULT_IGNORE_FILENAMES = ['Thumbs.db', 'thumbs.db', '.DS_Store', 'Icon\\r'] # 'Icon\\r' doesn't print properly anyhow, #weird\n\nDEFAULT_IGNORE_DIREXTS = ['pgbovine,.htm', 'pgbovine,.html']\n\nDEFAULT_SUMMARY_THRESHOLD = 10\n\n\nimport argparse\nimport json\nimport os\nimport sys\nimport time\nimport datetime\nfrom collections import Counter, defaultdict\n\n# requires python >= 3.5 to get os.walk to use the MUCH faster os.scandir function\nassert float(sys.version[:3]) >= 3.5\n\n\nprev_time = time.time() # kick it off at the very beginning\ndef print_time_elapsed(label):\n global prev_time\n cur_time = time.time()\n #print(f'{label}: {int(cur_time - prev_time)}s') # silence for now\n prev_time = cur_time\n\n# parses an inventory file created by create_inventory() in create_inventory.py\n# and returns a dict\ndef parse_inventory_file(filename):\n ret = {}\n\n assert os.path.isfile(filename)\n metadata = None\n\n # index the records in a few ways\n records_by_path = {} # key: (dirname, filename) tuple\n\n # key: modtime, value: list of records with this modtime\n records_by_modtime = defaultdict(list)\n\n # key: file size, value: list of records with this file size\n records_by_filesize = defaultdict(list)\n\n # key: crc32 hash value, value: list of records with this hash\n records_by_crc32 = defaultdict(list)\n\n n_records = 0\n for line in open(filename):\n record = json.loads(line)\n # first line is metadata\n if not metadata:\n metadata = record\n continue # ok, next!\n\n n_records += 1\n\n dn = record['d']\n fn = record['f']\n ext = record['e']\n modtime = record['mt']\n filesize = record['sz']\n\n assert (dn, fn) not in records_by_path\n records_by_path[(dn, fn)] = record\n\n records_by_modtime[modtime].append(record)\n records_by_filesize[filesize].append(record)\n\n try:\n crc32_val = record['crc32']\n records_by_crc32[crc32_val].append(record)\n except KeyError:\n pass\n\n # clean up metadata\n metadata['dt'] = datetime.datetime.utcfromtimestamp(metadata['ts']).strftime('%Y-%m-%d %H:%M:%S UTC')\n del metadata['ts']\n if not metadata['ignore_dirs']:\n del metadata['ignore_dirs']\n\n ret['metadata'] = metadata\n ret['records_by_path'] = records_by_path\n ret['records_by_modtime'] = records_by_modtime\n ret['records_by_filesize'] = records_by_filesize\n if records_by_crc32:\n ret['records_by_crc32'] = records_by_crc32\n\n assert len(records_by_path) == n_records\n assert sum(len(e) for e in records_by_modtime.values()) == n_records\n assert sum(len(e) for e in records_by_filesize.values()) == n_records\n return ret\n\n\n# create a tree from a list of files, with each dt node entry containing:\n# full_dirpath: tuple of strings (so that it's hashable), each containing\n# one directory name on the path from root to the current dir\n# files: list of file entries in this directory\n# subdirs: dict of subdirectories - key: name, value: dt node\ndef create_dirtree(files_lst):\n rootdir = dict(files=[],\n subdirs={}, # key: name of one subdirectory level, value: its entry\n full_dirpath=tuple()) # init to empty tuple since there are NO path components\n for e in files_lst:\n path_entries = e['dirs']\n if len(path_entries) == 1 and path_entries[0] == '': # top-level root dir\n rootdir['files'].append(e)\n else:\n cur_entry = rootdir # always start at root\n\n # traverse down path_entries and create children to the tree\n # rooted at rootdir as necessary:\n for i, p in enumerate(path_entries):\n if p not in cur_entry['subdirs']:\n # ugh, full_dirpath is messy to construct; i basically\n # just want the prefix of the full path_entries list\n cur_entry['subdirs'][p] = dict(files=[], subdirs={},\n full_dirpath=tuple(path_entries[:i+1]))\n cur_entry = cur_entry['subdirs'][p]\n cur_entry['files'].append(e)\n\n augment_dirtree_with_metadata(rootdir) # augment with stats!\n # verify!\n assert len(files_lst) == rootdir['metadata']['total_num_files']\n return rootdir\n\n\n# after a directory tree has been fully constructed with create_dirtree,\n# run this to AUGMENT each entry with a metadata dict containing stats\n# such as: num_files, num_subdirs, total_num_files, total_num_subdirs\n# where the latter 2 *recursively* count the total number of files and\n# subdirectories within this one\n# (note that this is a different kind of metadata than the one mentioned\n# in parse_inventory_file)\ndef augment_dirtree_with_metadata(dt):\n def helper(cur_entry):\n md = {}\n cur_entry['metadata'] = md\n\n # for only this level in the directory tree:\n md['num_files'] = len(cur_entry['files'])\n md['num_subdirs'] = len(cur_entry['subdirs'])\n\n # we might increment these later as we recurse ...\n md['total_num_files'] = md['num_files']\n md['total_num_subdirs'] = md['num_subdirs']\n\n for k in cur_entry['subdirs']:\n (child_num_files, child_num_subdirs) = helper(cur_entry['subdirs'][k])\n md['total_num_files'] += child_num_files\n md['total_num_subdirs'] += child_num_subdirs\n\n # return to parent!\n return (md['total_num_files'], md['total_num_subdirs'])\n helper(dt)\n\n\n# metadata is a dict, so serialize it by sorting and stringifying its items\ndef serialize_metadata(md):\n return str(sorted(md.items()))\n\n\n# returns the entry within dt that's referred to by dirpath\n# dt: a directory tree created with create_dirtree\n# dirpath: list of ordered directory names\n# (will give an error if dirpath isn't in dt)\ndef get_directory_entry(dt, dirpath):\n #print('get_directory_entry:', dirpath)\n cur = dt\n for e in dirpath:\n cur = cur['subdirs'][e]\n #print('~', cur['full_dirpath'], tuple(cur['subdirs'].keys()))\n #print('~~~')\n return cur\n\n\n# generator that traverses a dirtree object in pre-order (I think?)\n# TODO: rewrite other functions using this generator\ndef gen_dirtree_entries(dt):\n def helper(cur_entry):\n yield cur_entry\n for k in cur_entry['subdirs']:\n yield from helper(cur_entry['subdirs'][k])\n yield from helper(dt)\n\n\n# iterate over dt and run func on each entry in pre-order.\n# if func returns True, then return early and don't recurse inward\n# into its subdirectories. otherwise keep recursing inward\ndef dirtree_foreach(dt, func):\n def helper(cur_entry):\n return_early = func(cur_entry)\n if return_early:\n return\n\n # recurse inward ...\n for k in cur_entry['subdirs']:\n helper(cur_entry['subdirs'][k])\n helper(dt)\n\n\ndef make_dirtuple(dirpath):\n return tuple(dirpath.split('/'))\n\ndef dirtuple_to_path(dt):\n return '/'.join(dt)\n\n# from a file entry\ndef get_path_from_file(e):\n return (e['dirs'], e['fn'])\n\nfrom enum import Enum\nclass DirtreeCompareState(Enum):\n UNKNOWN = 1\n A_SUPERSET_OF_B = 2\n B_SUPERSET_OF_A = 3\n EQUAL = 4\n\n# compare two directory trees to see if they match in terms of\n# constituent files (name, size, modtime matches) and sub-directories,\n# or whether dtA is a strict superset of dtB,\n# or whether dtB is a strict superset of dtA\n#\n# we use only filenames and sizes for comparisons, which might result\n# in some false positives since file contents might have changed but\n# sizes remain the same!\n#\n# TODO: maybe implement a heuristic of:\n# if dtA or dtB have ZERO FILES and zero subdirs in them at the top level,\n# then return UNKNOWN since we really don't have enough info to determine\n# ... also it's kinda weird to say that something is a superset of an\n# EMPTY directory, since it's technically true but doesn't tell us much\ndef dirtree_compare(dtA, dtB):\n # start assuming they're equal and then have our traversal disprove it\n status = DirtreeCompareState.EQUAL\n\n def helper(dtA_entry, dtB_entry):\n nonlocal status # argh\n\n # note we don't check full_dirpath because dtA and dtB can have\n # vastly different paths but still contain the same files and\n # sub-directory names\n\n # each element contains the following identifying metadata about\n # each file that we will compare: fn (filename), size (# bytes)\n # (we don't include 'dirs' since the exact directory names will\n # be, by definition, different, between two directory trees)\n # [TODO: incorporate crc32 if available]\n # [TODO: incorporate an approximate check for e['mt'] within a\n # minute if not ignore_modtimes]\n dtA_files = set((e['fn'], e['size']) for e in dtA_entry['files'])\n dtB_files = set((e['fn'], e['size']) for e in dtB_entry['files'])\n\n if dtA_files == dtB_files:\n pass # keep going\n elif dtA_files.issuperset(dtB_files):\n # contradiction\n if status == DirtreeCompareState.B_SUPERSET_OF_A:\n return False\n status = DirtreeCompareState.A_SUPERSET_OF_B\n elif dtB_files.issuperset(dtA_files):\n # contradiction\n if status == DirtreeCompareState.A_SUPERSET_OF_B:\n return False\n status = DirtreeCompareState.B_SUPERSET_OF_A\n else:\n return False # bad!\n\n\n # now compare subdir names\n dtA_subdir_names = set(dtA_entry['subdirs'])\n dtB_subdir_names = set(dtB_entry['subdirs'])\n\n if dtA_subdir_names == dtB_subdir_names:\n pass # carry on\n elif dtA_subdir_names.issuperset(dtB_subdir_names):\n # contradiction\n if status == DirtreeCompareState.B_SUPERSET_OF_A:\n return False\n status = DirtreeCompareState.A_SUPERSET_OF_B\n elif dtB_subdir_names.issuperset(dtA_subdir_names):\n # contradiction\n if status == DirtreeCompareState.A_SUPERSET_OF_B:\n return False\n status = DirtreeCompareState.B_SUPERSET_OF_A\n else:\n return False # bad!\n\n # recurse into the *intersection* of subdirs so that they exist\n # in both dtA and dtB:\n common_subdirs = dtA_subdir_names & dtB_subdir_names\n\n # recurse inward ...\n for k in common_subdirs:\n # a single False and we're DONE FOR!\n ret = helper(dtA_entry['subdirs'][k], dtB_entry['subdirs'][k])\n if not ret:\n return False\n\n return True # we made it all the way!\n\n ret = helper(dtA, dtB)\n if not ret:\n status = DirtreeCompareState.UNKNOWN\n\n return status\n\n\n# dt: created by create_dirtree\n# summary_threshold: don't recurse anymore if the current level\n# (recursively) has more than N files\n#\n# min_levels_to_recurse: force recursing into at least N levels, even\n# if there are more than summary_threshold files at some levels\n#\n# if you pass in None, then assume infinite threshold or levels\n#\n# TODO: can we re-write this using dirtree_foreach? that would seem cleaner :)\ndef pretty_print_dirtree(dt, summary_threshold, min_levels_to_recurse,\n aux_dict_repr, hide_empty_dirs=False):\n if summary_threshold is None: summary_threshold = float('inf')\n if min_levels_to_recurse is None: min_levels_to_recurse = float('inf')\n\n def print_helper(cur_entry, level):\n n_files_recursive = cur_entry['metadata']['total_num_files']\n if hide_empty_dirs and n_files_recursive == 0:\n return\n\n prefix = (' ' * level)\n prefix_plus_one = (' ' * (level+1))\n\n if cur_entry['full_dirpath']:\n dirname = cur_entry['full_dirpath'][-1] # get the last entry\n else:\n # no elements in full_dirpath means the root directory\n dirname = ''\n\n print(prefix + '/' + dirname)\n #print(prefix + '/' + dirname + ' : ' + str(cur_entry['metadata'])) # more verbose with metadata\n\n # base case - get out and don't recurse anymore\n # use min_levels_to_recurse to force it into at least N levels\n if level >= min_levels_to_recurse and n_files_recursive > summary_threshold:\n print(f'{prefix_plus_one}[{n_files_recursive} files and possible sub-directories]')\n # recursive case\n else:\n n_files_in_cur_level = cur_entry['metadata']['num_files']\n assert n_files_in_cur_level == len(cur_entry['files'])\n # if n_files_in_cur_level is above summary_threshold, then\n # summarize this level but still recurse\n if n_files_in_cur_level > summary_threshold:\n print(f'{prefix_plus_one}[{n_files_in_cur_level} files]')\n else:\n # otherwise list out all the files\n for f in cur_entry['files']:\n print(f'{prefix_plus_one}{f[\"fn\"]} {aux_dict_repr(f)}')\n\n # now recurse! sort to print in alphabetical order\n for k in sorted(cur_entry['subdirs']):\n print_helper(cur_entry['subdirs'][k], level+1)\n\n print_helper(dt, 0)\n\n\n# compare inventories produced by parse_inventory_file\n# you can pass in optional paths to ignore\ndef compare_inventories(first, second, summary_threshold, min_levels_to_recurse,\n ignore_modtimes=False,\n ignore_dirs=[],\n ignore_filenames=[],\n ignore_exts=[],\n ignore_direxts=[],\n quiet=False):\n # make sure they start as empty lists\n if not ignore_dirs: ignore_dirs = []\n if not ignore_filenames: ignore_filenames = []\n if not ignore_exts: ignore_exts = []\n if not ignore_direxts: ignore_direxts = []\n\n # append defaults:\n ignore_dirs += DEFAULT_IGNORE_DIRS\n ignore_filenames += DEFAULT_IGNORE_FILENAMES\n ignore_direxts += DEFAULT_IGNORE_DIREXTS\n\n # parse it:\n ignore_direxts = [tuple(e.split(',')) for e in ignore_direxts]\n for e in ignore_direxts: assert len(e) == 2\n\n # make sure extensions start with '.'!\n for e in ignore_exts:\n assert e[0] == '.'\n\n\n # e is a (dirname, filename) pair\n def should_ignore(e):\n for d in ignore_dirs:\n if d in e[0]: # naive substring match\n return True\n if e[1] in ignore_filenames:\n return True\n\n ext = os.path.splitext(e[1])[1].lower() # extension - LOWERCASE IT for simplicity\n if ext in ignore_exts:\n return True\n\n # simultaneously match both a directory (e[0]) and an extension\n if (e[0], ext) in ignore_direxts:\n return True\n\n return False\n\n\n if not quiet:\n print(f'ignore_dirs: {ignore_dirs}\\nignore_filenames: {ignore_filenames}\\nignore_exts: {ignore_exts}\\nignore_direxts: {ignore_direxts}\\nsummary_threshold: {summary_threshold}\\nmin_levels_to_recurse: {min_levels_to_recurse}\\n')\n print('---')\n print('First: ', first['metadata'])\n print('Second:', second['metadata'])\n print('---')\n\n first_rbp = first['records_by_path']\n second_rbp = second['records_by_path']\n\n # filter out should_ignore() as early as possible for efficiency!\n first_rbp_keys = set(e for e in first_rbp if not should_ignore(e))\n second_rbp_keys = set(e for e in second_rbp if not should_ignore(e))\n\n in_first_but_not_second = first_rbp_keys.difference(second_rbp_keys)\n in_second_but_not_first = second_rbp_keys.difference(first_rbp_keys)\n in_both = first_rbp_keys.intersection(second_rbp_keys)\n print_time_elapsed('B')\n\n changed_files = []\n # for files in both first and second, compare their metadata\n for e in in_both:\n first_data = first_rbp[e]\n second_data = second_rbp[e]\n\n modtimes_differ = False\n modtimes_diff_secs = 0\n sizes_differ = False\n sizes_diff_bytes = 0\n\n # use a heuristic for 'close enough' in terms of modtimes\n # (within a minute)\n if not ignore_modtimes:\n modtimes_diff_secs = round(second_data['mt'] - first_data['mt'])\n if abs(modtimes_diff_secs) > 60:\n modtimes_differ = True\n\n if first_data['sz'] != second_data['sz']:\n sizes_differ = True\n sizes_diff_bytes = second_data['sz'] - first_data['sz']\n\n # only report a change if the SIZE differs\n # (note that there may be false positives if size remains the\n # same but internal bytes change)\n if sizes_differ:\n assert len(e) == 2\n changed_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],\n diff_secs=modtimes_diff_secs,\n diff_bytes=sizes_diff_bytes))\n\n changed_tree = create_dirtree(changed_files)\n print('files changed ...')\n\n def changed_repr(f):\n delta_bytes = None\n if f[\"diff_bytes\"] > 0:\n delta_bytes = f'+{f[\"diff_bytes\"]} bytes'\n elif f[\"diff_bytes\"] < 0:\n delta_bytes = f'{f[\"diff_bytes\"]} bytes'\n else:\n delta_bytes = 'NO SIZE CHANGE'\n return f'({str(datetime.timedelta(seconds=f[\"diff_secs\"]))}, {delta_bytes})'\n\n pretty_print_dirtree(changed_tree, summary_threshold, min_levels_to_recurse, changed_repr)\n\n\n # all files in first\n first_files = []\n for e in first_rbp_keys:\n assert len(e) == 2\n entry = first_rbp[e]\n first_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],\n size=entry['sz'], mt=entry['mt'], e=entry['e']))\n first_tree = create_dirtree(first_files)\n\n second_files = []\n for e in second_rbp_keys:\n assert len(e) == 2\n entry = second_rbp[e]\n second_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],\n size=entry['sz'], mt=entry['mt'], e=entry['e']))\n second_tree = create_dirtree(second_files)\n\n only_first_files = []\n # whoa it's much faster if you SORT first!\n # i suspect that create_dirtree works better if you feed it an\n # ordered list of paths, but i haven't empirically confirmed yet\n for e in sorted(in_first_but_not_second):\n assert len(e) == 2\n entry = first_rbp[e]\n only_first_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],\n size=entry['sz'], mt=entry['mt'], e=entry['e']))\n only_first_tree = create_dirtree(only_first_files)\n\n only_second_files = []\n for e in sorted(in_second_but_not_first): # whoa it's much faster if you SORT first!\n assert len(e) == 2\n entry = second_rbp[e]\n only_second_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],\n size=entry['sz'], mt=entry['mt'], e=entry['e']))\n only_second_tree = create_dirtree(only_second_files)\n\n\n # experimental: look for directories and files that have POSSIBLY\n # been moved between only_first_tree and only_second_tree\n\n # look up files by filesize first, then check modtimes\n # (TODO: if crc32 exists, then use that!)\n only_second_files_by_filesize = defaultdict(list)\n for e in only_second_files:\n only_second_files_by_filesize[e['size']].append(e)\n\n # subset of only_first_files containing files that ...\n only_first_files_not_moved = [] # have likely NOT been moved to second\n only_first_files_moved = [] # have likely been moved to second\n\n # subset of only_second_files containing files that ...\n only_second_files_not_moved = [] # have likely NOT been moved from first\n only_second_files_moved = [] # have likely been moved from first\n\n print_time_elapsed('C')\n\n # VERY IMPORTANT so that we can do set membership checks in O(1) time\n # instead of doing O(n^2) list membership checks, which were SLOWWWW:\n only_second_files_moved_paths = set() # each element is (tuple(dirs), fn)\n\n for f in only_first_files:\n file_moved = False\n\n sz = f['size']\n modtime = f['mt']\n extension = f['e']\n # are there files with the same size in second?\n if sz in only_second_files_by_filesize:\n same_size_matches = only_second_files_by_filesize[sz]\n # should match file extension and modtime as well,\n # which is a pretty strict check!\n #\n # either we ignore modtimes or check if they're within 1 minute\n match_size_ext_modtime = [e for e in same_size_matches if\n e['e'] == extension and\n (True if ignore_modtimes else abs(e['mt'] - modtime) < 60)]\n if match_size_ext_modtime:\n file_moved = True\n # TODO: what about files with multiple matches due to redundancies?\n for m in match_size_ext_modtime:\n m_path = get_path_from_file(m)\n # don't double-add to list (use a set to do O(1) time\n # redundancy lookups instead of O(n^2) if we used\n # the list directly)\n if m_path not in only_second_files_moved_paths:\n only_second_files_moved.append(m)\n only_second_files_moved_paths.add(m_path)\n\n if file_moved:\n only_first_files_moved.append(f)\n else:\n only_first_files_not_moved.append(f)\n\n print_time_elapsed('D')\n for f in only_second_files:\n f_path = get_path_from_file(f)\n if f_path not in only_second_files_moved_paths: # O(1) set membership check\n only_second_files_not_moved.append(f)\n print_time_elapsed('E')\n\n # consistency checks\n assert len(only_first_files_moved) + len(only_first_files_not_moved) == len(only_first_files)\n assert len(only_second_files_moved) + len(only_second_files_not_moved) == len(only_second_files)\n\n only_first_files_moved_tree = create_dirtree(only_first_files_moved)\n only_second_files_moved_tree = create_dirtree(only_second_files_moved)\n\n\n # checks to see if ENTIRE DIRECTORIES were likely moved from only_first_files_moved_tree\n # now do the *exact same symmetric check* to see what was likely\n # moved from only_second_files_moved_tree\n\n # each element is a pair of (dirpath in first, dirpath in second)\n # first moved verbatim over to second\n moved_directory_dirpaths = set()\n # first moved over to second, but second has MORE additional data inside\n # (so first is a subset of second)\n moved_directory_subset_dirpaths = set()\n\n # create a closure over my_full_tree and other_tree_to_check\n def make_directory_move_checker(my_full_tree, other_tree_to_check):\n # pass this into dirtree_foreach() ... returns True if you want to\n # break early and not recurse into children\n # - side-effect: adds entries to moved_directory_dirpaths\n def directory_move_checker(cur_entry):\n fdp = cur_entry['full_dirpath']\n\n # this is the metadata of the current directory within my_full_tree\n full_entry = get_directory_entry(my_full_tree, fdp)\n full_metadata = full_entry['metadata']\n\n # ok if this condition is true, that means that the ENTIRE contents\n # of the directory from my_full_tree does not appear in cur_entry,\n # which means that the entire directory wasn't moved. so get out!\n if full_metadata != cur_entry['metadata']:\n return False # return False early and keep recursing\n\n # check to make sure those trees are indeed equal\n # TODO: comment out in production if you want to speed things up\n assert dirtree_compare(cur_entry, full_entry) == DirtreeCompareState.EQUAL\n\n # now iterate through all nodes in other_tree_to_check and find any\n # that equal to or a *superset* of cur_entry.\n #\n # TODO: can optimize by stopping short whenever you find a node\n # whose metadata is *SMALLER* than cur_entry['metadata'] since\n # that can't possibly be a superset of cur_entry\n for other_entry in gen_dirtree_entries(other_tree_to_check):\n sdp = other_entry['full_dirpath']\n cur_to_other_comp = dirtree_compare(cur_entry, other_entry)\n # the entire contents of fdp has moved over to sdp\n if cur_to_other_comp == DirtreeCompareState.EQUAL:\n moved_directory_dirpaths.add((fdp, sdp))\n return True # if there's a real match, don't recurse\n # fdp is a subset of sdp, so that means everything\n # moved over but sdp has additional data inside\n elif cur_to_other_comp == DirtreeCompareState.B_SUPERSET_OF_A:\n moved_directory_subset_dirpaths.add((fdp, sdp))\n return True # if there's a real match, don't recurse\n\n return False # default, return False and keep recursing\n\n return directory_move_checker\n\n # symmetrically check both ends ...\n # TODO: optimize by eliminating redundancy later if necessary:\n dirtree_foreach(only_first_files_moved_tree,\n make_directory_move_checker(first_tree, only_second_files_moved_tree))\n\n # TODO: get this to work ...\n #dirtree_foreach(only_second_files_moved_tree,\n # make_directory_move_checker(second_tree, only_first_files_moved_tree))\n\n\n def plain_repr(f):\n return f'({f[\"size\"]} bytes, modtime: {int(f[\"mt\"])})'\n\n if moved_directory_dirpaths or moved_directory_subset_dirpaths:\n print('======')\n\n # ok now moved_directory_dirpaths should be populated with dirpaths\n # of ENTIRE DIRECTORIES that have moved ...\n for fdp, sdp in sorted(moved_directory_dirpaths):\n print('DIR_MOVED:\\n ', dirtuple_to_path(fdp))\n print(' ', dirtuple_to_path(sdp))\n\n for fdp, sdp in sorted(moved_directory_subset_dirpaths):\n from_tree = get_directory_entry(first_tree, fdp)\n from_tree_check = get_directory_entry(only_first_files_moved_tree, fdp)\n\n # TODO: comment out in production if you want to speed things up\n assert dirtree_compare(from_tree, from_tree_check) == DirtreeCompareState.EQUAL\n\n to_tree = get_directory_entry(only_second_tree, sdp)\n assert from_tree['full_dirpath'] == fdp\n assert to_tree['full_dirpath'] == sdp\n\n print('DIR_MOVED_SUBSET_HOLY_GAO:')\n print(' ', dirtuple_to_path(fdp))\n #pretty_print_dirtree(from_tree, 0, 2, plain_repr)\n #print()\n print(' ', dirtuple_to_path(sdp))\n #pretty_print_dirtree(to_tree, 0, 2, plain_repr)\n #print()\n\n\n # remove entries from only_first_files_moved_tree / only_second_files_moved_tree\n # if they appear in moved_directory_dirpaths or moved_directory_subset_dirpaths\n # - i *think* this is sufficient, according to my venn diagram sketches\n # - *maybe* it's easier to go off of the only_first_files_moved and\n # only_second_files_moved lists, filter them, and rebuild the trees?\n paths_to_filter_from_first = set()\n paths_to_filter_from_second = set()\n\n for fdp, sdp in moved_directory_dirpaths.union(moved_directory_subset_dirpaths):\n # important to filter from the subset of files that have been MOVED\n from_tree = get_directory_entry(only_first_files_moved_tree, fdp)\n to_tree = get_directory_entry(only_second_files_moved_tree, sdp)\n\n for d in gen_dirtree_entries(from_tree):\n paths_to_filter_from_first.update([get_path_from_file(e) for e in d['files']])\n for d in gen_dirtree_entries(to_tree):\n paths_to_filter_from_second.update([get_path_from_file(e) for e in d['files']])\n\n # filter the respective lists:\n only_first_files_moved = [e for e in only_first_files_moved\n if get_path_from_file(e) not in paths_to_filter_from_first]\n only_second_files_moved = [e for e in only_second_files_moved\n if get_path_from_file(e) not in paths_to_filter_from_second]\n\n # now rebuild those trees ...\n only_first_files_moved_tree = create_dirtree(only_first_files_moved)\n only_second_files_moved_tree = create_dirtree(only_second_files_moved)\n\n print('======')\n print('files in first that were moved ...')\n pretty_print_dirtree(only_first_files_moved_tree, summary_threshold, min_levels_to_recurse, plain_repr)\n print('\\nfiles in second that were moved ...')\n pretty_print_dirtree(only_second_files_moved_tree, summary_threshold, min_levels_to_recurse, plain_repr)\n\n print('======')\n print('only in first (but not moved) ...')\n pretty_print_dirtree(create_dirtree(only_first_files_not_moved), summary_threshold, min_levels_to_recurse, plain_repr)\n print('\\nonly in second (but not moved) ...')\n pretty_print_dirtree(create_dirtree(only_second_files_not_moved), summary_threshold, min_levels_to_recurse, plain_repr)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # mandatory positional arguments:\n parser.add_argument(\"first_file\", help=\"first inventory file to compare\")\n parser.add_argument(\"second_file\", help=\"second inventory file to compare\")\n parser.add_argument(\"--ignore_modtimes\", help=\"ignore modification times\", action=\"store_true\")\n parser.add_argument(\"--ignore_dirs\", nargs='+', help=\"ignore the following directories: \")\n parser.add_argument(\"--ignore_files\", nargs='+', help=\"ignore the following filenames: \")\n parser.add_argument(\"--ignore_exts\", nargs='+', help=\"ignore the following file extensions (use lowercase!): \")\n parser.add_argument(\"--ignore_direxts\", nargs='+', help=\"ignore the following file extensions within directories: of entries, each being 'dirname,extension'\")\n parser.add_argument(\"--summary_threshold\", action='store', default=DEFAULT_SUMMARY_THRESHOLD, help=\"summarize a directory when it or its subdirectories have more than N files\")\n parser.add_argument(\"--min_levels\", action='store', default=3, help=\"but recurse into at least N levels\")\n parser.add_argument(\"--quiet\", help=\"less verbose output\", action=\"store_true\")\n\n args = parser.parse_args()\n first = parse_inventory_file(args.first_file)\n print_time_elapsed('first parse done')\n second = parse_inventory_file(args.second_file)\n print_time_elapsed('second parse done')\n compare_inventories(first, second,\n int(args.summary_threshold),\n int(args.min_levels),\n args.ignore_modtimes,\n args.ignore_dirs, args.ignore_files,\n args.ignore_exts, args.ignore_direxts,\n args.quiet)\n", "sub_path": "compare_inventories.py", "file_name": "compare_inventories.py", "file_ext": "py", "file_size_in_byte": 32402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.version", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 71, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 74, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 241, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path", "line_number": 430, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 502, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 549, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 755, "usage_type": "call"}]} +{"seq_id": "340674584", "text": "from influxdb import InfluxDBClient\nfrom numpy.random import default_rng\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport random\nimport csv \nimport os\nfrom optimal_downsampling_manager.resource_predictor.estimate_table import Degraded_IATable, get_context, DownTimeTable, DownRatioTable, Degraded_Q_IATable, get_month_and_day\nfrom math import e\nimport sys\nimport yaml\nimport argparse\nwith open('configuration_manager/config.yaml','r') as yamlfile:\n data = yaml.load(yamlfile,Loader=yaml.FullLoader)\nnp.random.seed(10)\n\nDBclient = InfluxDBClient(host=data['global']['database_ip'], port=data['global']['database_port'], database=data['global']['database_name'], username='root', password='root')\nresultDBclient = InfluxDBClient(host=data['global']['database_ip'], port=data['global']['database_port'], database=data['global']['exp_database_name'], username='root', password='root')\n\nresult = DBclient.query('SELECT * FROM MaxAnalyticTargetNumber')\nMaxTargetTable = pd.DataFrame(list(result.get_points(measurement=\"MaxAnalyticTargetNumber\")))\n\nresult = DBclient.query('SELECT * FROM visual_features_entropies_PCA_normalized')\nPCATable = pd.DataFrame(list(result.get_points(measurement=\"visual_features_entropies_PCA_normalized\")))\n\n\nalog_list = ['EF','EFR','FIFO','approx','heuristic','opt']\n\n\nSEEN_ANALY_LIST = [\"illegal_parking0\", \"people_counting\"]\nUNSEEN_ANALY_LIST = [\"illegal_parking1\", \"car_counting\"]\n\nif __name__=='__main__':\n round_ = 1\n week = \"week\"\n for ro in range(round_):\n print(\"Generate queries...Round \",ro)\n if os.path.isfile(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv'):\n os.remove(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv')\n \n start_day = 9\n end_day = 15\n size = (end_day-start_day+1)\n\n query_video_list = []\n chosen_ana_list = []\n\n rng = default_rng()\n full_length_sample_quality_info_df = None\n full_info_df = None\n\n for r in range(size):\n \n date = str(r + start_day)\n result = DBclient.query(\"SELECT * FROM raw_11_\"+str(date))\n per_day_video_list = list(result.get_points(measurement=\"raw_11_\"+str(date)))\n video_num_per_day = len(per_day_video_list)\n poisson_query = np.random.poisson(lam=8/video_num_per_day, size=video_num_per_day) # 8 request / 24 hour \n \n # with open(f'./poisson.csv','a',newline='') as f:\n # writer = csv.writer(f)\n # writer.writerow([poisson_query, sum(poisson_query)])\n\n\n for idx_q, num_q in enumerate(poisson_query):\n if num_q == 0:\n continue\n chosen_ana_list.append(rng.choice(len(SEEN_ANALY_LIST), num_q ,replace=True))\n query_video_list.append(per_day_video_list[idx_q])\n\n result = DBclient.query('SELECT * FROM sample_quality_alltarget_inshot_11_'+str(date))\n full_length_sample_quality_info_df = pd.concat([full_length_sample_quality_info_df, pd.DataFrame(list(result.get_points(measurement='sample_quality_alltarget_inshot_11_'+str(date))))])\n result = DBclient.query('SELECT * FROM analy_complete_result_inshot_11_'+str(date))\n full_info_df = pd.concat([full_info_df, pd.DataFrame(list(result.get_points(measurement='analy_complete_result_inshot_11_'+str(date))))])\n\n for algo in alog_list:\n result = resultDBclient.query(\"SELECT * FROM video_in_server_\"+algo)\n video_in_server = pd.DataFrame(list(result.get_points(measurement = \"video_in_server_\"+algo)))\n query_result_ia = []\n for q in query_video_list:\n # information amount of original video\n # print(\"Querying\",q['name'],\"...\")\n origin_video_info = (full_info_df.loc[(full_info_df['name']==q['name']) & (full_info_df['a_type']=='illegal_parking0')]['target'].iloc[0] / MaxTargetTable.loc[(MaxTargetTable['a_type']=='illegal_parking0')]['value'].iloc[0]) \n origin_video_info += (full_info_df.loc[(full_info_df['name']==q['name']) & (full_info_df['a_type']=='people_counting')]['target'].iloc[0] / MaxTargetTable.loc[(MaxTargetTable['a_type']=='people_counting')]['value'].iloc[0]) \n origin_video_info += PCATable.loc[PCATable['name']==q['name']].iloc[0]['value']\n \n target_point = video_in_server.loc[video_in_server['name']==q['name']]\n \n if not target_point.empty:\n target_fps = str(target_point['fps'].iloc[0]); target_bitrate = str(target_point['bitrate'].iloc[0])\n \n ### Information amount of complete videos in server\n if target_fps =='24' and target_bitrate =='1000':\n preserved_video_info = origin_video_info\n else: ### Information amount of sampled videos in server\n try:\n preserved_video_info_ill0 = full_length_sample_quality_info_df.loc[(full_length_sample_quality_info_df['name']==q['name']) & (full_length_sample_quality_info_df['a_type']=='illegal_parking0') & (full_length_sample_quality_info_df['fps']==target_fps) & (full_length_sample_quality_info_df['bitrate']==target_bitrate)]['target'].iloc[0]\n preserved_video_info_ill0 /= MaxTargetTable.loc[(MaxTargetTable['a_type']=='illegal_parking0')]['value'].iloc[0]\n except:\n print(q['name'], \"fps:\", target_fps, \"bitrate:\", target_bitrate,'ill')\n preserved_video_info_ill0 = 0\n try:\n preserved_video_info_peo = full_length_sample_quality_info_df.loc[(full_length_sample_quality_info_df['name']==q['name']) & (full_length_sample_quality_info_df['a_type']=='people_counting') & (full_length_sample_quality_info_df['fps']==target_fps) & (full_length_sample_quality_info_df['bitrate']==target_bitrate)]['target'].iloc[0]\n preserved_video_info_peo /= MaxTargetTable.loc[(MaxTargetTable['a_type']=='people_counting')]['value'].iloc[0]\n except:\n print(q['name'], \"fps:\", target_fps, \"bitrate:\", target_bitrate,'peo')\n preserved_video_info_peo = 0\n try:\n preserved_video_info_pca = PCATable.loc[PCATable['name']==q['name']].iloc[0]['value']\n except:\n preserved_video_info_pca = 0\n preserved_video_info = preserved_video_info_ill0 + preserved_video_info_peo + preserved_video_info_pca\n \n info_error = abs(origin_video_info-preserved_video_info)\n else:\n print(\"Queried video has been deleted...\")\n info_error = origin_video_info\n\n query_result_ia.append(info_error)\n\n with open(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv','a',newline='') as f:\n writer = csv.writer(f)\n writer.writerow([sum(query_result_ia)/len(query_result_ia), max(query_result_ia)])\n\n\n\n\n\n\n \n \n\n\n\n\n \n \n \n\n \n\n\n \n", "sub_path": "query_generator.py", "file_name": "query_generator.py", "file_ext": "py", "file_size_in_byte": 7452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "yaml.load", "line_number": 15, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 18, "usage_type": "call"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random.poisson", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 79, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "566139547", "text": "import datetime\nimport os\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\nclass Representation:\n def __init__(self):\n self.iterations =[]\n self.values_fc = []\n\n def reset_chart(self):\n self.iterations= []\n self.values_fc= []\n\n def add_point_to_chart(self,iteration, value):\n self.iterations.append(iteration)\n self.values_fc.append(value)\n\n def save_chart(self,path,testCase,test,bests,iterations):\n figure, axes = plt.subplots()\n plt.ylabel('Uzyskany koszt sieci w danej iteracji')\n plt.xlabel('Iteracja')\n plt.plot(range(int(iterations)), bests, linewidth=2.0)\n #plt.show()\n plt.savefig(path + \"/\" + testCase + \"_\" + test + '_chart.png',\n bbox_inches='tight', format='png')\n plt.close(figure)\n\n def generate_out_files(self):\n plt.ylabel('Wartość funkcji celu')\n plt.xlabel('Iteracja')\n print(self.iterations)\n plt.plot(self.iterations, self.values_fc, linewidth=2.0)\n #plt.grid(True)\n plt.title(\"Funkcja celu\")\n plt.show()\n\n def save_graph(self, Graph, path, testCase, test, config_parameters, cost, cost_cities, cost_ps, show):\n figure, axes = plt.subplots()\n cities = {}\n electricity = {}\n edges_electricity = {}\n edges_cities = {}\n keysC = set()\n keysE = set()\n for node in Graph.nodes():\n if node >= 0:\n cities.update({node: (Graph.node[node]['x'], Graph.node[node]['y'])})\n else:\n electricity.update({node: (Graph.node[node]['x'], Graph.node[node]['y'])})\n\n for edge in Graph.edges():\n if edge[0] < 0 or edge[1] < 0:\n # edges_electricity.update({edge: ((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n # (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']))})\n edges_electricity.update({(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']):\n (Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']):\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])})\n keysE.add(((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])))\n else:\n # edges_cities.update({edge[0]: ((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n # (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']))})\n edges_cities.update({(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']):\n (Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']):\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])})\n keysC.add(((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),\n (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])))\n\n nx.draw_networkx_nodes(Graph, cities, cities.keys(), node_color='red', node_size=150,\n label='Miasto',\n ax=axes)\n nx.draw_networkx_nodes(Graph, electricity, electricity.keys(), node_color='blue', node_size=150, node_shape='h',\n label='\\nElektrownia\\n',\n ax=axes)\n\n # nx.draw_networkx_edges(Graph, edges_cities, edge_color=\"black\" )\n nx.draw_networkx_edges(Graph, edges_cities, keysC,\n label=\"Rail network cost:\" + str(format(cost_cities, '.7f')) + '\\nK: ' +\n config_parameters[0], ax=axes)\n nx.draw_networkx_edges(Graph, edges_electricity, keysE, edge_color=\"red\",\n label=\"Power grid cost:\" + str(format(cost_ps, '.7f')) + '\\nKe: ' +\n config_parameters[1],ax=axes)\n empty = {(0, 0): (0, 0)}\n nx.draw_networkx_nodes(Graph, empty, empty.keys(), node_color='white', node_size=0,\n label='\\n\\nCAPEX: ' + str(format(cost, '.7f'))\n + '\\nPopulation: ' + str(config_parameters[2])\n + '\\nSelection: ' + str(config_parameters[3])\n + '\\nIterations: ' + str(config_parameters[4]),\n ax=axes)\n # nx.draw_networkx(Graph)\n handles, labels = axes.get_legend_handles_labels()\n legend = axes.legend(handles, labels, loc='upper center', ncol=3, bbox_to_anchor=(0.5, -0.1))\n # legend.get_frame().set_alpha(0.5)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.title(\"Najlepsze uzyskane rozwiązanie\")\n\n if show:\n plt_copy = plt\n plt_copy.show()\n\n # plt.imsave(path+ folder_out+\"/\"+ testCase + \"_\" + test + '_bestIm.png', format='png')\n plt.savefig(path + \"/\" + testCase + \"_\" + test + '_theBest.png',\n bbox_extra_artists=(legend,), bbox_inches='tight', format='png')\n plt.close(figure)\n", "sub_path": "src/representation.py", "file_name": "representation.py", "file_ext": "py", "file_size_in_byte": 5446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 74, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 77, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 82, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 85, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "453182898", "text": "#! /usr/bin/env python\n# Public domain; MZMcBride, 2011; Legoktm, 2014\n\nfrom flask import Flask, request\nimport cgi\nimport urllib\nimport re\nimport oursql\nimport operator\nimport json\nimport os\n\napp = Flask(__name__)\n\nmy_cnf = os.path.expanduser('~/replica.my.cnf')\n\n\ndef database_list():\n conn = oursql.connect(host='enwiki.labsdb', db='meta_p', read_default_file=my_cnf)\n cursor = conn.cursor()\n cursor.execute('''\n /* checker.py database_list */\n SELECT\n dbname\n FROM wiki\n WHERE is_closed = 0;\n ''')\n databases = cursor.fetchall()\n cursor.close()\n conn.close()\n return [database[0] for database in databases]\n\n\ndef choose_host_and_domain(db):\n conn = oursql.connect(host='enwiki.labsdb',\n db='meta_p',\n read_default_file=my_cnf)\n cursor = conn.cursor()\n cursor.execute('''\n /* checker.py choose_host_and_domain */\n SELECT\n url\n FROM wiki\n WHERE dbname = ?;\n ''', (db,))\n for row in cursor.fetchall():\n domain = '%s' % row[0]\n cursor.close()\n conn.close()\n return {'host': db + '.labsdb', 'domain': domain}\n\n\ndef get_extension_namespaces(domain):\n params = {\n 'action': 'query',\n 'meta': 'proofreadinfo|siteinfo',\n 'piprop': 'namespaces',\n 'siprop': 'namespaces',\n 'format': 'json'\n }\n query_url = '%s/w/api.php?%s' % (domain, urllib.urlencode(params))\n app.logger.debug(query_url)\n url_contents = urllib.urlopen(query_url).read()\n parsed_content = json.loads(url_contents)\n page_namespace = parsed_content['query']['proofreadnamespaces']['page']['id']\n index_namespace = parsed_content['query']['proofreadnamespaces']['index']['id']\n names = parsed_content['query']['namespaces']\n return {'page_namespace': page_namespace, 'index_namespace': index_namespace, 'names': names}\n\n\ndef get_page_links(cursor, db, page_namespace, index_namespace, index_page):\n page_links = []\n cursor.execute('''\n /* checker.py get_page_links */\n SELECT\n pl_title\n FROM pagelinks\n JOIN page AS p1\n ON pl_from = p1.page_id\n JOIN page AS p2\n ON p2.page_title = pl_title\n AND p2.page_namespace = pl_namespace\n WHERE pl_namespace = ?\n AND p1.page_namespace = ?\n AND p1.page_title = ?;\n ''', (page_namespace, index_namespace, index_page))\n for row in cursor.fetchall():\n pl_title = row[0]\n #app.logger.debug(row[0])\n try:\n sort_key = int(unicode(row[0].rsplit('/', 1)[1].decode('utf-8')))\n except IndexError:\n sort_key = 1\n page_links.append([pl_title, sort_key])\n return page_links\n\ndef get_page_status(cursor, db, page_namespace, page):\n page_status = {}\n # Check if the page has transclusions first\n cursor.execute('''\n /* checker.py get_page_status */\n SELECT\n COUNT(*)\n FROM templatelinks\n WHERE tl_namespace = ?\n AND tl_title = ?;\n ''', (page_namespace, page))\n transclusion_count = cursor.fetchone()\n if transclusion_count:\n page_status['transclusion_count'] = transclusion_count[0] \n # Then check if the page has been proofread\n cursor.execute('''\n /* checker.py get_page_status */\n SELECT\n cl_to\n FROM page\n JOIN categorylinks\n ON cl_from = page_id\n WHERE page_id = cl_from\n AND page_namespace = ?\n AND page_title = ?;\n ''', (page_namespace, page))\n proofread_status = cursor.fetchone()\n if proofread_status:\n page_status['proofread_status'] = proofread_status[0].lower().replace('_', ' ')\n return page_status\n\n\n@app.route('/')\ndef main():\n TEXT = ''\n # Pick a db; make enwikisource the default\n if request.args.get('db') is not None:\n db = request.args.get('db').replace('_p', '')\n else:\n db = 'enwikisource'\n\n # All right, now let's pick a host and domain\n connection_props = choose_host_and_domain(db)\n host = connection_props['host']\n domain = connection_props['domain']\n\n # Run this awful function to grab the namespace names that are required.\n extension_dict = get_extension_namespaces(domain)\n page_namespace = extension_dict['page_namespace']\n index_namespace = extension_dict['index_namespace']\n page_namespace_name = extension_dict['names'][str(page_namespace)]['*']\n index_namespace_name = extension_dict['names'][str(index_namespace)]['*']\n\n if 'title' in request.args:\n title = request.args.get('title')\n else:\n title = ''\n\n yes_table = '''\\\n\n%s\n
'''\n yes_rows = []\n\n no_table = '''\\\n\n%s\n
'''\n no_rows = []\n\n tables = []\n if host is not None and title:\n conn = oursql.connect(host=host, db=db+'_p', read_default_file=my_cnf)\n cursor = conn.cursor()\n # Eliminate LTR and RTL marks and strip extra whitespace.\n title = re.sub(r'(\\xe2\\x80\\x8e|\\xe2\\x80\\x8f)', '', title).strip(' ')\n # Prep the title for the query (replace spaces and strip namespace name if present).\n clean_title = title.replace(' ', '_').split(index_namespace_name+':', 1)[1]\n page_links = get_page_links(cursor, db, page_namespace, index_namespace, clean_title)\n if page_links:\n # Sort!\n page_links = sorted(page_links, key=operator.itemgetter(1))\n for item in page_links:\n page_link = item[0]\n sort_key = item[1]\n status = get_page_status(cursor, db, page_namespace, page_link)\n if status['transclusion_count'] > 0:\n yes_table_row = '''\\\n\n\n
%s\\\n\n\n%s\n\n''' % (domain,\n urllib.quote('%s:%s' % (page_namespace_name, page_link)),\n cgi.escape('%s:%s' % (page_namespace_name, page_link.replace('_', ' ')), quote=True),\n status['proofread_status'])\n yes_rows.append(yes_table_row)\n else:\n no_table_row = '''\\\n\n\n%s\\\n\n\n%s\n\n''' % (domain,\n urllib.quote('%s:%s' % (page_namespace_name, page_link)),\n cgi.escape('%s:%s' % (page_namespace_name, page_link.replace('_', ' ')), quote=True),\n status['proofread_status'])\n no_rows.append(no_table_row)\n tables.append(yes_rows)\n tables.append(no_rows)\n cursor.close()\n conn.close()\n\n TEXT += '''\\\n\n\n\n\n\n\n\n\nchecker\n\n\n\\\n'''\n\n if title:\n if db and host is not None and title:\n TEXT += '
'\n count = 0\n for table in tables:\n if count == 0:\n TEXT += '

Transcluded

'\n else:\n TEXT += '

Not transcluded

'\n TEXT += '''\\\n\n\n\n\n\n\n\n\n%s\n\n
PageStatus
''' % ('\\n'.join(table))\n count += 1\n TEXT += '
'\n else:\n TEXT += '''\\\n
\nThere was some sort of error. Sorry. :-(\n
'''\n\n elif host is None:\n TEXT += '''\\\n
\nYou didn't specify an appropriate database name.\n
'''\n\n else:\n TEXT += '''\\\n
\n\n\n\n\n\n\n\n\n\n\n\n
Input index title below.
Database\n\n
\n\n
\n
'''\n\n TEXT += '''\\\n
\n
\npublic domain · \\\nbugs\n
\n
\n\n\n'''\n return TEXT\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "checker/checker.py", "file_name": "checker.py", "file_ext": "py", "file_size_in_byte": 9445, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "oursql.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "oursql.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 63, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 151, "usage_type": "name"}, {"api_name": "oursql.connect", "line_number": 169, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 172, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 178, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 193, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 194, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 207, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "332385312", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclicksData = pd.read_csv('./clicksInfo.csv', '\\t', header=0).ix[:,:].as_matrix()\nconversionsData = pd.read_csv('./conversionsInfo.csv', '\\t', header=0).ix[:,:].as_matrix()\n\nobjForCpc = {}\n\nfor row in clicksData:\n\tif(row[0] in objForCpc):\n\t\tobjForCpc[row[0]]['days'].append(row[1])\n\t\tobjForCpc[row[0]]['cpc'].append(row[2])\n\telse:\n\t\tobjForCpc[row[0]] = {}\n\t\tobjForCpc[row[0]]['days'] = [row[1]]\n\t\tobjForCpc[row[0]]['cpc'] = [row[2]]\n\nkeys = objForCpc.keys()\nn_groups = len(objForCpc[keys[0]]['cpc'])\nfig, ax = plt.subplots(figsize=(20,10))\nindex = np.arange(n_groups)\nbar_width = 0.2\nopacity = 0.8\n\ncolors = ['red','green','b','yellow','orange','black','pink']\ncolorCounter=0\n\nfor ob in keys:\n plt.bar(index + bar_width*colorCounter, objForCpc[ob]['cpc'], bar_width, alpha=opacity, color=colors[colorCounter], label=ob)\n colorCounter += 1\n\nplt.xlabel('Date')\nplt.ylabel('Avg cpc')\nplt.title('Average CPCs')\nplt.xticks(index + bar_width, objForCpc[keys[0]]['days'])\nplt.legend()\n\n#plt.tight_layout()\n#plt.show()\nplt.savefig('./images/AvgCpc.png')\n", "sub_path": "plots/basicBarForAvgCpc.py", "file_name": "basicBarForAvgCpc.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "215224187", "text": "import time\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom audio import Audio\nfrom dataset import new_audio_datasets\nfrom losses import MaskedL1\nfrom model.io import ModelPackage\nfrom utils.common import Averager\nfrom utils.config import Config\nfrom utils.decorators import ignore_exception\nfrom utils.display import plot_mel, plot_attention, display_params, stream\nfrom utils.paths import Paths\n\n\nclass Session:\n\n def __init__(self,\n index: int,\n r: int,\n lr: int,\n max_step: int,\n bs: int,\n train_set: DataLoader,\n val_set: DataLoader) -> None:\n self.index = index\n self.r = r\n self.lr = lr\n self.max_step = max_step\n self.bs = bs\n self.train_set = train_set\n self.val_set = val_set\n\n\nclass Trainer:\n\n def __init__(self, cfg: Config):\n self.cfg = cfg\n self.paths = Paths()\n self.audio = Audio(cfg)\n self.ckpt_path = self.paths.ckpt/cfg.config_id\n log_dir = self.ckpt_path/'tensorboard'\n self.writer = SummaryWriter(log_dir=log_dir, comment='v1')\n self.criterion = MaskedL1()\n\n def train(self, model: ModelPackage):\n for i, session_params in enumerate(self.cfg.training_schedule, 1):\n r, lr, max_step, bs = session_params\n if model.tacotron.step < max_step:\n train_set, val_set = new_audio_datasets(\n paths=self.paths, batch_size=bs, r=r, cfg=self.cfg)\n session = Session(\n index=i, r=r, lr=lr, max_step=max_step,\n bs=bs, train_set=train_set, val_set=val_set)\n self.train_session(model, session)\n\n def train_session(self, model: ModelPackage, session: Session):\n model.r = session.r\n cfg = self.cfg\n tacotron, gan = model.tacotron, model.gan\n taco_opti, gen_opti, disc_opti = \\\n model.taco_opti, model.gen_opti, model.disc_opti\n device = next(tacotron.parameters()).device\n display_params([\n ('Session', session.index), ('Reduction', session.r),\n ('Max Step', session.max_step), ('Learning Rate', session.lr),\n ('Batch Size', session.bs), ('Steps per Epoch', len(session.train_set))\n ])\n\n for g in taco_opti.param_groups:\n g['lr'] = session.lr\n\n loss_avg = Averager()\n duration_avg = Averager()\n\n while tacotron.get_step() <= session.max_step:\n\n for i, (seqs, mels, stops, ids, lens) in enumerate(session.train_set):\n seqs, mels, stops, lens = \\\n seqs.to(device), mels.to(device), stops.to(device), lens.to(device)\n t_start = time.time()\n block_step = tacotron.get_step() % cfg.steps_to_eval + 1\n\n tacotron.train()\n lin_mels, post_mels, att = tacotron(seqs, mels)\n\n lin_loss = self.criterion(lin_mels, mels, lens)\n post_loss = self.criterion(post_mels, mels, lens)\n\n loss = lin_loss + post_loss\n loss_avg.add(loss)\n\n taco_opti.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(tacotron.parameters(), 1.0)\n taco_opti.step()\n\n duration_avg.add(time.time() - t_start)\n steps_per_s = 1. / duration_avg.get()\n self.writer.add_scalar('Loss/train', loss, tacotron.get_step())\n self.writer.add_scalar('Params/reduction_factor', session.r, tacotron.get_step())\n self.writer.add_scalar('Params/batch_sze', session.bs, tacotron.get_step())\n self.writer.add_scalar('Params/learning_rate', session.lr, tacotron.get_step())\n\n msg = f'{block_step}/{cfg.steps_to_eval} | Step: {tacotron.get_step()} ' \\\n f'| {steps_per_s:#.2} steps/s | Avg. Loss: {loss_avg.get():#.4} '\n stream(msg)\n\n if tacotron.step % cfg.steps_to_checkpoint == 0:\n self.save_model(model, step=tacotron.get_step())\n\n if tacotron.step % self.cfg.steps_to_eval == 0:\n val_loss = self.evaluate(model, session.val_set, msg)\n self.writer.add_scalar('Loss/val', val_loss, tacotron.step)\n self.save_model(model)\n stream(msg + f'| Val Loss: {float(val_loss):#0.4} \\n')\n loss_avg.reset()\n duration_avg.reset()\n\n if tacotron.step > session.max_step:\n return\n\n def evaluate(self, model, val_set, msg) -> float:\n model.tacotron.eval()\n val_loss = 0\n device = next(model.tacotron.parameters()).device\n for i, batch in enumerate(val_set, 1):\n stream(msg + f'| Evaluating {i}/{len(val_set)}')\n seqs, mels, stops, ids, lens = batch\n seqs, mels, stops, lens = \\\n seqs.to(device), mels.to(device), stops.to(device), lens.to(device)\n with torch.no_grad():\n pred = model.tacotron(seqs, mels)\n lin_mels, post_mels, att = pred\n lin_loss = F.l1_loss(lin_mels, mels)\n post_loss = F.l1_loss(post_mels, mels)\n val_loss += lin_loss + post_loss\n if i == 1:\n self.generate_samples(model, batch, pred)\n\n val_loss /= len(val_set)\n return float(val_loss)\n\n def save_model(self, model: ModelPackage, step=None):\n model.save(self.ckpt_path/'latest_model.zip')\n if step is not None:\n model.save(self.ckpt_path/f'model_step_{step}.zip')\n\n @ignore_exception\n def generate_samples(self, model: ModelPackage,\n batch: torch.Tensor, pred: torch.Tensor):\n seqs, mels, stops, ids, lens = batch\n lin_mels, post_mels, att = pred\n mel_sample = mels.transpose(1, 2)[0, :lens[0]].detach().cpu().numpy()\n gta_sample = post_mels.transpose(1, 2)[0, :lens[0]].detach().cpu().numpy()\n att_sample = att[0].detach().cpu().numpy()\n target_fig = plot_mel(mel_sample)\n gta_fig = plot_mel(gta_sample)\n att_fig = plot_attention(att_sample)\n self.writer.add_figure('Mel/target', target_fig, model.tacotron.step)\n self.writer.add_figure('Mel/ground_truth_aligned', gta_fig, model.tacotron.step)\n self.writer.add_figure('Attention/ground_truth_aligned', att_fig, model.tacotron.step)\n\n target_wav = self.audio.griffinlim(mel_sample, 32)\n gta_wav = self.audio.griffinlim(gta_sample, 32)\n self.writer.add_audio(\n tag='Wav/target', snd_tensor=target_wav,\n global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)\n self.writer.add_audio(\n tag='Wav/ground_truth_aligned', snd_tensor=gta_wav,\n global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)\n\n seq = seqs[0].tolist()\n _, gen_sample, att_sample = model.tacotron.generate(seq, steps=lens[0])\n gen_fig = plot_mel(gen_sample)\n att_fig = plot_attention(att_sample)\n self.writer.add_figure('Attention/generated', att_fig, model.tacotron.step)\n self.writer.add_figure('Mel/generated', gen_fig, model.tacotron.step)\n gen_wav = self.audio.griffinlim(gen_sample, 32)\n self.writer.add_audio(\n tag='Wav/generated', snd_tensor=gen_wav,\n global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)\n\n", "sub_path": "trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 7709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 28, "usage_type": "name"}, {"api_name": "utils.config.Config", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.paths.Paths", "line_number": 42, "usage_type": "call"}, {"api_name": "audio.Audio", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 46, "usage_type": "call"}, {"api_name": "losses.MaskedL1", "line_number": 47, "usage_type": "call"}, {"api_name": "model.io.ModelPackage", "line_number": 49, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 52, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 52, "usage_type": "name"}, {"api_name": "dataset.new_audio_datasets", "line_number": 53, "usage_type": "call"}, {"api_name": "model.io", "line_number": 58, "usage_type": "argument"}, {"api_name": "model.io.ModelPackage", "line_number": 60, "usage_type": "name"}, {"api_name": "model.io.r", "line_number": 61, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 61, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 63, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 63, "usage_type": "name"}, {"api_name": "model.io.gan", "line_number": 63, "usage_type": "attribute"}, {"api_name": "model.io.taco_opti", "line_number": 65, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 65, "usage_type": "name"}, {"api_name": "model.io.gen_opti", "line_number": 65, "usage_type": "attribute"}, {"api_name": "model.io.disc_opti", "line_number": 65, "usage_type": "attribute"}, {"api_name": "utils.display.display_params", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.common.Averager", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.common.Averager", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.display.stream", "line_number": 110, "usage_type": "call"}, {"api_name": "model.io", "line_number": 113, "usage_type": "argument"}, {"api_name": "model.io", "line_number": 116, "usage_type": "argument"}, {"api_name": "model.io", "line_number": 118, "usage_type": "argument"}, {"api_name": "utils.display.stream", "line_number": 119, "usage_type": "call"}, {"api_name": "model.io.tacotron.eval", "line_number": 127, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 127, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 127, "usage_type": "name"}, {"api_name": "model.io.tacotron.parameters", "line_number": 129, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 129, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 129, "usage_type": "name"}, {"api_name": "utils.display.stream", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 135, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 136, "usage_type": "call"}, {"api_name": "model.io", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.functional.l1_loss", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.functional.l1_loss", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 139, "usage_type": "name"}, {"api_name": "model.io", "line_number": 142, "usage_type": "argument"}, {"api_name": "model.io.ModelPackage", "line_number": 147, "usage_type": "name"}, {"api_name": "model.io.save", "line_number": 148, "usage_type": "call"}, {"api_name": "model.io", "line_number": 148, "usage_type": "name"}, {"api_name": "model.io.save", "line_number": 150, "usage_type": "call"}, {"api_name": "model.io", "line_number": 150, "usage_type": "name"}, {"api_name": "model.io.ModelPackage", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 154, "usage_type": "attribute"}, {"api_name": "utils.display.plot_mel", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.display.plot_mel", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.display.plot_attention", "line_number": 162, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 163, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 163, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 164, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 164, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 165, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 165, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 171, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 171, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 174, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 174, "usage_type": "name"}, {"api_name": "model.io.tacotron.generate", "line_number": 177, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 177, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 177, "usage_type": "name"}, {"api_name": "utils.display.plot_mel", "line_number": 178, "usage_type": "call"}, {"api_name": "utils.display.plot_attention", "line_number": 179, "usage_type": "call"}, {"api_name": "model.io.tacotron", "line_number": 180, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 180, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 181, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 181, "usage_type": "name"}, {"api_name": "model.io.tacotron", "line_number": 185, "usage_type": "attribute"}, {"api_name": "model.io", "line_number": 185, "usage_type": "name"}, {"api_name": "utils.decorators.ignore_exception", "line_number": 152, "usage_type": "name"}]} +{"seq_id": "457832215", "text": "\nfrom PIL import Image, ImageDraw\nimport math\nimport random\n\n\n\n\nSIZE=(500, 700)\n\nSTART=(250, SIZE[1]-100)\n\ndef radians(degrees):\n return degrees/180*math.pi\n\n\ndef sortAngleHelper(angle):\n return getAngleDistance(angle, radians(-90))\n\n\ndef getAngleDistance(angleA, angleB):\n distance = angleA - angleB\n if distance < 0:\n distance = -distance\n if distance > 2*math.pi:\n distance = distance % 2*math.pi\n if distance > math.pi:\n distance = 2*math.pi - distance\n return distance\n\n\n\n\ndef interpolateColor(colorA, colorB, ratio):\n rgba = []\n for i in range(4):\n color = int(colorA[i] * (1-ratio) + colorB[i] * ratio)\n rgba.append(color)\n return (rgba[0], rgba[1], rgba[2], rgba[3])\n\ndef getRandomColorDeviation(color, variance):\n rgb = []\n for i in range(3):\n value = random.randrange(color[i]-variance, color[i]+variance)\n if value > 255:\n value = 255\n if value < 0:\n value = 0\n rgb.append(value)\n return (rgb[0], rgb[1], rgb[2], color[3])\n\ndef getDistance(p1, p2):\n x = p2[0] - p1[0]\n y = p2[1] - p1[1]\n return math.sqrt(x*x + y*y)\n\ndef interpolateScalar(s1, s2, ratio):\n return (s1*(1-ratio) + s2*ratio)\n\ndef interpolate(p1, p2, ratio):\n x = p1[0] * (1-ratio) + p2[0] * ratio\n y = p1[1] * (1-ratio) + p2[1] * ratio\n return [x, y]\n\n\ndef randRangeFloat(min, max):\n diff = max-min\n randPart = random.random()*diff\n return (min + randPart)\n\ndef randRangeFloatArray(arr):\n return randRangeFloat(arr[0], arr[1])\n\n\ndef interpolateArc(p1, p2, arcSize, ratio):\n bottomPoint = interpolate(p1, p2, ratio)\n parabolicRatio = 4*(ratio - ratio*ratio)\n arc = arcSize * parabolicRatio\n point = (bottomPoint[0], bottomPoint[1] + arc)\n return point\n\ndef interpolateArcScalar(arcSize, ratio):\n parabolicRatio = 4*(ratio - ratio*ratio)\n return arcSize * parabolicRatio\n\ndef getBoundariesForCircle(center, diameter):\n radius = diameter/2\n x1 = center[0] - radius\n x2 = center[0] + radius\n y1 = center[1] - radius\n y2 = center[1] + radius\n return (x1, y1, x2, y2)\n\ndef drawCircle(center, diameter, color, draw : ImageDraw.ImageDraw):\n xy = getBoundariesForCircle(center, diameter)\n draw.ellipse(xy, fill=color)\n return\n\nnumToDraw = 30\n\nOUTPUT_PATH = \"sprites/objects/trees/\"\n\nSTUMP_START_WIDTH_RANGE = [12, 16]\nHEIGHT_RANGE = [400, 550]\nSTUMP_DECAY_RATIO = 0.995\nRESOLUTION = 5\n\nBRANCH_START_LENGTH_RANGE = [100, 120]\nBRANCH_DECAY_RATIO = 0.995\n\nVERT_RATIO = 1/math.sqrt(3)\n\nSTUMP_COLOR = (100, 60, 20, 255)\n\nBRANCH_ARC_RATIO = 0.2\n\nBRANCH_COLOR = (0, 90, 40, 255)\nBRANCH_COLOR_DEVIATION = 10\nBRANCH_WIDTH = 1\n\nSHADOW_OFFSET = 2\n\nSUB_BRANCH_RATIO = 0.5\n\ndef drawBranch(start, length, angle, draw : ImageDraw.ImageDraw):\n endX = start[0] + math.cos(angle)*length\n endY = start[1] + math.sin(angle)*length*VERT_RATIO\n end = (endX, endY)\n arcSize = BRANCH_ARC_RATIO * length\n numPieces = int(length/RESOLUTION)\n color = getRandomColorDeviation(BRANCH_COLOR, BRANCH_COLOR_DEVIATION)\n shadowColor = interpolateColor(color, (0, 0, 0, 255), 0.5)\n angleSubBranchLeft = angle - radians(45)\n angleSubBranchRight = angle + radians(45)\n subBranchMaxLength = length * SUB_BRANCH_RATIO\n for i in range(numPieces):\n ratio = i/numPieces\n nextRatio = (i+1)/numPieces\n segmentStart = interpolateArc(start, end, arcSize, ratio)\n segmentEnd = interpolateArc(start, end, arcSize, nextRatio)\n shadowStart = (segmentStart[0], segmentStart[1]+SHADOW_OFFSET)\n shadowEnd = (segmentEnd[0], segmentEnd[1]+SHADOW_OFFSET)\n subBranchLength = interpolateArcScalar(subBranchMaxLength, ratio)\n subBranchLeftEndX = segmentEnd[0] + math.cos(angleSubBranchLeft)*subBranchLength\n subBranchLeftEndY = segmentEnd[1] + math.sin(angleSubBranchLeft)*subBranchLength*VERT_RATIO\n subBranchRightEndX = segmentEnd[0] + math.cos(angleSubBranchRight)*subBranchLength\n subBranchRightEndY = segmentEnd[1] + math.sin(angleSubBranchRight)*subBranchLength*VERT_RATIO\n subBranchLeftEnd = (subBranchLeftEndX, subBranchLeftEndY)\n subBranchRightEnd = (subBranchRightEndX, subBranchRightEndY)\n subBranchLeftShadowEnd = (subBranchLeftEndX, subBranchLeftEndY + SHADOW_OFFSET)\n subBranchRightShadowEnd = (subBranchRightEndX, subBranchRightEndY + SHADOW_OFFSET)\n draw.line((shadowStart, shadowEnd), fill=shadowColor, width=BRANCH_WIDTH)\n draw.line((shadowStart, subBranchLeftShadowEnd), fill=shadowColor, width=BRANCH_WIDTH)\n draw.line((shadowStart, subBranchRightShadowEnd), fill=shadowColor, width=BRANCH_WIDTH)\n draw.line((segmentStart, segmentEnd), fill=color, width=BRANCH_WIDTH)\n draw.line((segmentStart, subBranchLeftEnd), fill=color, width=BRANCH_WIDTH)\n draw.line((segmentStart, subBranchRightEnd), fill=color, width=BRANCH_WIDTH)\n\n\n return\n\nANGLE_SPREAD_RATIO = 2\ndef drawBranches(start, n, lengthRange, draw : ImageDraw.ImageDraw):\n angleSeeds = []\n angleSeedsSum = 0\n for i in range(n):\n angleSeed = randRangeFloat(1, ANGLE_SPREAD_RATIO)\n angleSeeds.append(angleSeed)\n angleSeedsSum += angleSeed\n nextAngle = randRangeFloat(0, 2*math.pi)\n angles = []\n for seed in angleSeeds:\n nextAngle += seed/angleSeedsSum*2*math.pi % (2*math.pi)\n angles.append(nextAngle)\n angles.sort(key=sortAngleHelper)\n for angle in angles:\n length = randRangeFloatArray(lengthRange)\n drawBranch(start, length, angle, draw)\n return\n\nBRANCH_SPACE = 20\n\nBRANCH_N = 6\n\ndef drawTree(draw : ImageDraw.ImageDraw):\n stumpWidth = randRangeFloatArray(STUMP_START_WIDTH_RANGE)\n height = randRangeFloatArray(HEIGHT_RANGE)\n location = START\n heightSoFar = 0\n lastBranch = 0\n branchRatio = 1\n while True:\n drawCircle(location, stumpWidth, STUMP_COLOR, draw)\n location = (location[0], location[1] - RESOLUTION)\n stumpWidth = randRangeFloat(stumpWidth*STUMP_DECAY_RATIO, stumpWidth)\n if lastBranch >= BRANCH_SPACE:\n nBranches = random.randrange(4, 8)\n branchRange = [BRANCH_START_LENGTH_RANGE[0]*branchRatio, BRANCH_START_LENGTH_RANGE[1]*branchRatio]\n drawBranches(location, nBranches, branchRange, draw)\n lastBranch = 0\n heightSoFar += RESOLUTION\n lastBranch += RESOLUTION\n branchRatio *= BRANCH_DECAY_RATIO\n if heightSoFar > height:\n break\n\n\n\nOUTPUT_PATH = \"sprites/objects/trees/\"\nNUM_TREES = 30\n\n\nfor i in range(NUM_TREES):\n image = Image.new('RGBA', SIZE, (0, 0, 0, 0))\n draw = ImageDraw.Draw(image)\n drawTree(draw)\n filename = OUTPUT_PATH + \"evergreen_\" + str(i) + \".png\"\n image.save(filename, \"PNG\")\n\n\n\n\n\n\n\n", "sub_path": "generate/objects/trees/evergreen.py", "file_name": "evergreen.py", "file_ext": "py", "file_size_in_byte": 6806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "math.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 26, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 27, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 28, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 44, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 55, "usage_type": "call"}, {"api_name": "random.random", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.ImageDraw.ImageDraw", "line_number": 94, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw", "line_number": 94, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 111, "usage_type": "call"}, {"api_name": "PIL.ImageDraw.ImageDraw", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw", "line_number": 125, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 126, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 127, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 144, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 145, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 146, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 147, "usage_type": "call"}, {"api_name": "PIL.ImageDraw.ImageDraw", "line_number": 163, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw", "line_number": 163, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 170, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw.ImageDraw", "line_number": 185, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw", "line_number": 185, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 197, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 214, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 214, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 215, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 215, "usage_type": "name"}]} +{"seq_id": "216516250", "text": "from typing import List\n\n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n if amount == 0:\n return 0\n if coins is None or len(coins) == 0:\n return -1\n coins = sorted(coins)\n if amount < coins[0]:\n return -1\n result = [1000000 for i in range(amount + 1)]\n result[0] = 0\n for i in range(1, len(result)):\n for c in coins:\n if i - c >= 0:\n result[i] = min(result[i - c] + 1, result[i])\n return result[amount] if result[amount] < 1000000 else -1\n\n\ncoins = [186, 419, 83, 408]\namount = 6249\nresult = Solution().coinChange(coins, amount)\nprint(result)\n\n", "sub_path": "coin_change.py", "file_name": "coin_change.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "439059818", "text": "import pandas as pd\nimport requests\nimport settings\nimport pygame.mixer\nimport os\nimport time\n\n\naitalk_url = \"https://webapi.aitalk.jp/webapi/v2/ttsget.php\"\ncsv_path = \"./audio/dialogue.csv\"\naudio_folder = \"./audio/\"\ndf = pd.read_csv(csv_path, index_col=0)\npygame.mixer.init()\n\ndef get_dialogue(status):\n print(df['dialogue'][status])\n return df['dialogue'][status]\n\ndef get_filename(status):\n print(df['filename'][status])\n return df['filename'][status]\n\ndef play_audio(status):\n file_path = audio_folder + df['filename'][status]\n print(file_path)\n audio = pygame.mixer.Sound(file_path)\n channel = audio.play()\n # wait to finish\n while channel.get_busy():\n pygame.time.delay(100)\n pygame.time.delay(500)\n\ndef call_my_name(display_name):\n text = display_name\n filename = display_name + '.wav'\n request_aitalk(text, filename)\n file_path = audio_folder + filename\n audio = pygame.mixer.Sound(file_path)\n channel = audio.play()\n # wait to finish\n while channel.get_busy():\n pygame.time.delay(100)\n pygame.time.delay(300)\n play_audio(\"start\")\n # remove audio file\n os.remove(file_path)\n\ndef ready_to_take():\n play_audio(\"ok\")\n pygame.time.delay(200)\n\ndef count_down():\n play_audio(\"count3\")\n pygame.time.delay(500)\n play_audio(\"count2\")\n pygame.time.delay(500)\n play_audio(\"count1\")\n pygame.time.delay(500)\n audio = pygame.mixer.Sound(audio_folder + \"shut.wav\")\n channel = audio.play()\n # wait to finish\n while channel.get_busy():\n pygame.time.delay(100)\n\ndef request_aitalk(dialogue, filename):\n params = {\n 'username': settings.AITALK_USERNAME,\n 'password': settings.AITALK_PASSWORD,\n 'text': dialogue,\n 'speaker_name': 'miyabi_west',\n 'input_type': 'text',\n 'volume': 1.00, # 音量\n 'speed': 1.10, # 話速\n 'pitch': 1.30, # 声の高さ\n 'range': 1.20, # 抑揚(声の高さの範囲)\n 'ext': 'wav'\n }\n # get an audio file from AITALK\n response = requests.get(aitalk_url, params=params)\n if response.status_code == 200:\n with open(audio_folder + filename, 'wb') as saveFile:\n saveFile.write(response.content)\n else:\n print(response)\n\ndef get_audio(status):\n dialogue = get_dialogue(status)\n filename = get_filename(status)\n # request an audio file and save it\n request_aitalk(dialogue, filename)\n\ndef main():\n get_dialogue(\"smile again\")\n get_filename(\"smile again\")\n get_audio(\"start\")\n call_my_name(\"ドナルド・フォントルロイ・ダック\")\n count_down()\n #request_aitalk(\"オバチャンが撮ったるで!\", 'test.mp3')\n\nif __name__ == '__main__':\n main()\n \n", "sub_path": "dialogue.py", "file_name": "dialogue.py", "file_ext": "py", "file_size_in_byte": 2754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.mixer.mixer.init", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.mixer.mixer", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.mixer.mixer.Sound", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.mixer.mixer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 30, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 31, "usage_type": "name"}, {"api_name": "pygame.mixer.mixer.Sound", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.mixer.mixer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 38, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 43, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.mixer.time.delay", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 50, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 54, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 56, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 58, "usage_type": "name"}, {"api_name": "pygame.mixer.mixer.Sound", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.mixer.mixer", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 59, "usage_type": "name"}, {"api_name": "pygame.mixer.time.delay", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.mixer.time", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 63, "usage_type": "name"}, {"api_name": "settings.AITALK_USERNAME", "line_number": 67, "usage_type": "attribute"}, {"api_name": "settings.AITALK_PASSWORD", "line_number": 68, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "434005392", "text": "import pandas as pd\nfrom sklearn import svm, metrics\n\nor_input = [\n [1,0,1],\n [0,1,1],\n [0,0,0],\n [1,1,1],\n [1,0,1],\n [0,1,1],\n [0,0,0],\n [1,1,1],\n [0,0,0],\n [1,1,0],\n]\n\nor_df = pd.DataFrame(or_input)\nor_data = or_df.ix[:, 0:1]\nor_label = or_df.ix[:, 2]\n\nclf = svm.SVC()\nclf.fit(or_data, or_label)\npre = clf.predict(or_data)\n\nac_score = metrics.accuracy_score(or_label, pre)\nprint(\"정답률 =\", ac_score) # 정답률이 맞는듯 예측할만한 데이터가 없어보여..", "sub_path": "04_AI/1_Machine_Learning/150_or_train.py", "file_name": "150_or_train.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 21, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "162549020", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass StockCollection(object):\n def __init__(self):\n self.__stocks = []\n\n def add_stock(self, stock):\n self.__stocks.append(stock)\n\n def get_stock(self, index):\n return self.__stocks[index]\n\n def get_stocks(self):\n return self.__stocks\n\n def count(self):\n return len(self.__stocks)\n\n def get_correlation(self):\n all_stocks = self.__stocks[0].get_historical_close()\n tickers = [self.__stocks[0].get_name()]\n for stock in self.__stocks[1:]:\n all_stocks = pd.merge(all_stocks,\n stock.get_historical_close(),\n left_index=True,\n right_index=True)\n tickers.append(stock.get_name())\n\n all_stocks.columns = tickers\n corr = all_stocks.corr()\n plt.imshow(corr, cmap='hot', interpolation='nearest')\n sns.heatmap(\n corr,\n xticklabels=corr.columns,\n yticklabels=corr.columns,\n annot=True,\n center=0,\n cmap=sns.diverging_palette(150, 10, as_cmap=True)\n )\n plt.show()\n\n def show_stock_list(self):\n print(\"[0] All\")\n for i in range(len(self.__stocks)):\n print(\"[\" + str(i + 1) + \"] \" + self.__stocks[i].get_name())\n\n def show_all_sma_charts(self, days):\n for i in range(self.count()):\n self.__stocks[i-1].show_sma_chart(days)\n\n def show_all_ichimoku_charts(self):\n for i in range(len(self.__stocks)):\n self.__stocks[i].show_ichimoku_chart()\n", "sub_path": "project/StockCollection.py", "file_name": "StockCollection.py", "file_ext": "py", "file_size_in_byte": 1674, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pandas.merge", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 35, "usage_type": "call"}, {"api_name": "seaborn.diverging_palette", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "558180748", "text": "#!/usr/bin/env python3\n# coding=utf-8\n\n# Author: Junjie Wang\n# Mail: dreamboy.gns@sjtu.edu.cn\n\n# Website:http://120.79.231.160\n# Blog:http://120.79.231.160/wordpress\n \n# Created Time: 2018-12-01 21:03:11\n\n# -------------- For more details, refer to https://wiseodd.github.io/techblog/2015/10/17/metropolis-hastings/ -------------\n\n# Notes:\n# Gibbs sampling is the special case of the M-H sampling, as in Gibbs, the acceptance ratio is 1. Besides, Gibbs samples from conditional distribution while the M-H samples from joint distribution. So Gibbs is faster than M-H, but we have to perform mathematical analysis beforehand(.e.g deduct the conditional distribution)\n# Metropolis algorithm is also a special case of the M-H algorithm, namely when the transition matrix Q become symmetric\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\nimport seaborn as sns\n\nmus = np.array([5,5])\nsigmas = np.array([[1,.9],[.9,1]])\n\ndef circle(x,y):\n return (x-1)**2 + (y-2)**2 - 3**2\n \ndef gGaussian(x,y):\n return st.multivariate_normal.pdf([x,y],mean=mus,cov=sigmas)\n\n\n# actually we are implementing the Metropolis algorithm, as here we assume that the Q matrix is symmetric\ndef m_h(p,iter=1000):\n x,y = .0,.0\n samples = np.zeros((iter,2))\n for i in range(iter):\n x_,y_ = np.array([x,y]) + np.random.normal(size=2)\n if np.random.rand() < p(x_,y_)/p(x,y):\n x,y = x_,y_\n samples[i,:] = np.array([x,y])\n return samples\n\nif __name__ == '__main__':\n samples = m_h(circle,10000)\n sns.jointplot(samples[:,0],samples[:,1])\n\n samples = m_h(gGaussian,10000)\n sns.jointplot(samples[:,0],samples[:,1])\n\n plt.show()\n\n\n", "sub_path": "M_H_Sampling.py", "file_name": "M_H_Sampling.py", "file_ext": "py", "file_size_in_byte": 1693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.pdf", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 30, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 46, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "653305119", "text": "import textblob\nfrom polyglot.detect import Detector\n\nhello_dict = {\"english\": \"hello\",\n \"french\": \"bonjour\",\n \"spanish\": \"hola\"\n }\n\n\ndef polygot_detection():\n for key in hello_dict:\n p = Detector(hello_dict[key]).languages[0]\n print(\"{} - {} - confidence: {}\".format(hello_dict[key], p.name, p.confidence))\n\n\ndef texblob_detection():\n for key in hello_dict:\n b = textblob.TextBlob(hello_dict[key])\n print(\"{} - {}\".format(hello_dict[key], b))\n\n\n\n#polygot_detection()\ntexblob_detection()\n", "sub_path": "python/language/language_detect.py", "file_name": "language_detect.py", "file_ext": "py", "file_size_in_byte": 561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "polyglot.detect.Detector", "line_number": 12, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "262511262", "text": "import pandas as pd\nimport numpy as np\nimport re\nimport os\nimport matplotlib.pyplot as plt\nfrom nltk.corpus import stopwords\nfrom six.moves import cPickle as pickle\nfrom nltk.corpus import stopwords\nfrom sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\nimport itertools\nno_alignment_file = [4764]\nwrong_alignment = [3730]\nfrom keras.layers import Activation, Input, Dense, Flatten, Dropout, Embedding\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D\nfrom keras.layers.merge import concatenate\nfrom keras import regularizers\nfrom keras.models import Model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import train_test_split\nfrom keras.optimizers import Adadelta\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau\nimport keras_metrics\n\ndef extract_patterns(data,extract=False):\n if(extract):\n patterns = {}\n for index, row in data.iterrows():\n patterns[row['index']] = set(get_pattern([row['text']])[0].values())\n print('Extracted pattern from '+ row['index'] + ' index:'+ str(index))\n print('Size: ', len(patterns[row['index']]), 'Patterns size', len(patterns))\n try:\n print('Saving Pickle')\n with open('pickles/patterns/pattern.pickle','wb') as f:\n save = {\n 'patterns' : patterns\n }\n pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)\n print('Successfully saved in pattern.pickle')\n return patterns\n except Exception as e:\n print('Unable to save data to pickle', e)\n print('Patterns probably not saved.')\n return patterns\n else:\n try:\n with open('pickles/patterns/pattern.pickle','rb') as f:\n save = pickle.load(f)\n patterns = save['patterns']\n del save\n returning = {}\n for key in list(data['index']):\n returning[key] = patterns[key]\n return returning\n except Exception as e:\n print('Error loading base datasets pickle: ', e)\n \ndef clean_text(text, remove_actions= True):\n punct_str = '!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~«»“…‘”'\n if(remove_actions):\n text = re.sub(r\" ?\\[[^)]+\\]\", \"\", text)\n for p in punct_str:\n text = text.replace(p,' ')\n text = re.sub(' +', ' ', text)\n return text.lower().strip()\n\ndef filter_word_count(data, n_count):\n return data[list(map(lambda x: len(x.split(' ')) >= n_count,data['text']))]\n\n\ndef remove_empty_patterns(data,patterns):\n empty_patterns = [k for k, v in patterns.items() if len(v) < 1]\n patterns = { k:v for k, v in patterns.items() if len(v) >= 1 }\n data = filter(lambda x: x[1]['index'] not in empty_patterns ,data.iterrows())\n data = pd.DataFrame.from_items(data).T\n return data,patterns\n\ndef remove_multiwildcard(patterns):\n for index, patt in patterns.items():\n flt_patt = {p for p in patt if p.split(' ').count('.+') == 1}\n patterns[index] = flt_patt\n return patterns\n\ndef load_data(word_count,emotional_mapping):\n # full = generate_IEMOCAP_df()\n data = pd.read_csv('data/IEMOCAP_sentences_votebased.csv',index_col=0)\n data['emotion_code'] = data['emotion'].map( emotional_mapping ).astype(int)\n # Take away fear, surprise,disgust, xxx and others. Not enough data\n data = data[data.emotion_code < 4]\n #Remove rows that don't have Alignment file\n try:\n data = data.drop(no_alignment_file)\n except Exception as e:\n print('Error at: ',e)\n # Remove rows that have wrong Alignment file\n try:\n data = data.drop(wrong_alignment)\n except Exception as e:\n print('Error at: ',e)\n# Clean Transcripts\n data['text'] = data['text'].apply(clean_text)\n # Filter Word Count\n data = filter_word_count(data, word_count)\n patterns = extract_patterns(data)\n data,patterns = remove_empty_patterns(data,patterns)\n patterns = remove_multiwildcard(patterns)\n return data,patterns\n\ndef load_acoustic_fullmatrices(extraction_type = 'full',extract_fd = False):\n if(extraction_type in ['full','wc','cw']):\n try:\n if(extract_fd):\n fullmfcc_matrix_fd = None\n fullrmse_matrix_fd = pd.read_pickle('pickles/patterns/'+extraction_type+'_rmse_matrix_fd.pickle')\n print('Successfully loaded '+extraction_type+' RMSE Matrix FULLDATA')\n fullzcr_matrix_fd = pd.read_pickle('pickles/patterns/'+extraction_type+'_zcr_matrix_fd.pickle')\n print('Successfully loaded '+extraction_type+' ZCR Matrix FULLDATA') \n with open('pickles/patterns/'+extraction_type+'_mfcc20_matrix_fd.pickle','rb') as f:\n save = pickle.load(f)\n fullmfcc_matrix_fd = save['multimatrix']\n del save\n print('Successfully loaded '+extraction_type+' MFCC Matrices FULLDATA')\n fullmfcc_matrix_fd.append(fullrmse_matrix_fd)\n fullmfcc_matrix_fd.append(fullzcr_matrix_fd)\n return fullmfcc_matrix_fd\n else:\n fullmfcc_matrix = None\n fullrmse_matrix = pd.read_pickle('pickles/patterns/'+extraction_type+'_rmse_matrix.pickle')\n print('Successfully loaded '+extraction_type+' RMSE Matrix') \n fullzcr_matrix = pd.read_pickle('pickles/patterns/'+extraction_type+'_zcr_matrix.pickle')\n print('Successfully loaded '+extraction_type+' ZCR Matrix')\n with open('pickles/patterns/'+extraction_type+'_mfcc20_matrix.pickle','rb') as f:\n save = pickle.load(f)\n fullmfcc_matrix = save['multimatrix']\n del save\n print('Successfully loaded '+extraction_type+' MFCC Matrices') \n fullmfcc_matrix.append(fullrmse_matrix)\n fullmfcc_matrix.append(fullzcr_matrix)\n return fullmfcc_matrix\n except Exception as e:\n print('Error loading matrix: ', e)\n else:\n print('Error')\n return None,None\n\ndef get_frequency_vectors(data,patterns_list):\n patterns = extract_patterns(data)\n transcript_order = list(data['index'])\n frequency_vectors = []\n for index in patterns:\n frequency_vectors.append(np.isin(patterns_list,np.array(list(patterns[index]))))\n vectors = pd.DataFrame(frequency_vectors,columns=patterns_list,index=patterns.keys())\n vectors = vectors.loc[transcript_order]\n vectors = vectors * 1\n return vectors\n\nseed = 7\nnp.random.seed(seed)\nemotional_mapping = {'ang': 0, 'sad': 1, 'hap': 2, 'neu': 3,'fru': 4,'exc': 5,'fea': 6,'sur': 7,'dis': 8, 'xxx':9,'oth':10}\n\ndata, patterns = load_data(3,emotional_mapping)\n# x_train, x_test, y_train, y_test = train_test_split(data, data.emotion_code, test_size=TEST_SIZE)\ntry:\n with open('pickles/matrix_basedata.pickle','rb') as f:\n save = pickle.load(f)\n X_train = save['X_train']\n X_test = save['X_test']\n y_train = save['y_train']\n y_test = save['y_test']\n del save\nexcept Exception as e:\n print('Error loading base datasets pickle: ', e)\n\ny_train = pd.get_dummies(y_train).values\ny_test = pd.get_dummies(y_test).values\n\nfull_matrices = load_acoustic_fullmatrices(extraction_type='full',extract_fd = True)\nwc_matrices = load_acoustic_fullmatrices(extraction_type='wc',extract_fd = True)\ncw_matrices = load_acoustic_fullmatrices(extraction_type='cw',extract_fd = True)\n########################################################################################\nRMSE_INDEX = 20\nZCR_INDEX = 21\n###########################################################################################\n\nem_df = pd.read_pickle('pickles/patterns/pfief_matrix.pickle')\n\npatterns_list = np.array(list(em_df.index))\nprint(len(em_df),len(full_matrices),len(wc_matrices),len(cw_matrices))\n\nvectors = get_frequency_vectors(X_train,patterns_list)\ntest_vectors = get_frequency_vectors(X_test,patterns_list)\n\n###########################################################################################\n####### PARAMETERS ########\n# EMBEDDING\nEMBEDDING_DIM = 4\nMAX_SEQ_LENGTH = 170\n\n# MODEL\nFILTER_SIZES = [1,1,1]\nFEATURE_MAPS = [150,150,150]\nDROPOUT_RATE = 0.2\n\n# LEARNING\nBATCH_SIZE = 200\nNB_EPOCHS = 50\nRUNS = 1\nVAL_SIZE = 0.2\nLEARNING_RATE = 0.01\n\n##############################################################################\n# acoustic_matrix = full_matrices[RMSE_INDEX]\n# acoustic_matrix = acoustic_matrix.fillna(np.max(acoustic_matrix))\nNUM_CHANNELS = 22\nacoustic_matrices = full_matrices[:20].copy()\nacoustic_matrices.append(full_matrices[ZCR_INDEX])\nfor i,am in enumerate(acoustic_matrices):\n acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))\n\n#######################################\nfull_data = []\nfor key, row in vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n full_data.append(final)\n \ntest_full_data = []\nfor key, row in test_vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n test_full_data.append(final)\n\nacoustic_matrices = cw_matrices[:20].copy()\nacoustic_matrices.append(cw_matrices[ZCR_INDEX])\nfor i,am in enumerate(acoustic_matrices):\n acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))\n\ncw_data = []\nfor key, row in vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n cw_data.append(final)\n \ntest_cw_data = []\nfor key, row in test_vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n test_cw_data.append(final)\n\n\nacoustic_matrices = wc_matrices[:20].copy()\nacoustic_matrices.append(wc_matrices[ZCR_INDEX])\n\nfor i,am in enumerate(acoustic_matrices):\n acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))\n\n\nwc_data = []\nfor key, row in vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n wc_data.append(final)\n \ntest_wc_data = []\nfor key, row in test_vectors.iterrows():\n final = []\n row_patt = [ i for i,v in row.iteritems() if v == 1]\n row_matrix = em_df.loc[row_patt,:].as_matrix()\n pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix\n final.append(pad)\n ### ACU MATRICES ###\n for i,am in enumerate(acoustic_matrices):\n acu_matrix = am.loc[row_patt,:].as_matrix()\n acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))\n acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix\n final.append(acu_pad)\n test_wc_data.append(final)\nimport time\nimport multiap_cnn_model\n# # BALANCED DATA\nprinting = {}\nFILTER_SIZES_AR = [[1,1,1]]\nfilter_sizes_names = ['1_1_1']\nFEATURE_MAPS_AR = [[150,150,150]]\nfeature_maps_names = ['150']\nDROPOUT_RATE = 0.2\nLEARNING_RATE = 0.01\nRUNS = 1 \nDATA_AR = [ wc_data,cw_data]\nTEST_DATA_AR = [test_wc_data,test_cw_data]\ndata_names = ['wc','cw']\nMAX_SEQ_LENGTH = 170\nfor Findex,filterS in enumerate(FILTER_SIZES_AR):\n for Mindex, featureM in enumerate(FEATURE_MAPS_AR):\n for Dindex, dataV in enumerate(DATA_AR):\n FILTER_SIZES = filterS\n FEATURE_MAPS = featureM\n histories = []\n for i in range(RUNS):\n print('Running iteration %i/%i' % (i+1, RUNS))\n start_time = time.time()\n emb_layer = None\n\n model = multiap_cnn_model.build_cnn(\n embedding_dim= EMBEDDING_DIM,\n filter_sizes = FILTER_SIZES,\n feature_maps = FEATURE_MAPS, \n max_seq_length = MAX_SEQ_LENGTH,\n dropout_rate=DROPOUT_RATE,\n num_channels=NUM_CHANNELS\n )\n\n model.compile(\n loss='binary_crossentropy',\n optimizer=Adadelta(clipvalue=3,lr=LEARNING_RATE),\n metrics=['accuracy',keras_metrics.precision(),keras_metrics.recall()]\n )\n\n history = model.fit(\n [dataV], y_train,\n epochs=NB_EPOCHS,\n batch_size=BATCH_SIZE,\n verbose=1,\n validation_data=([TEST_DATA_AR[Dindex]], y_test),\n callbacks=[ModelCheckpoint('model-%i.h5'%(i+1), monitor='val_loss',\n verbose=0, save_best_only=True, mode='min'),\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, min_lr=0.01)\n ]\n )\n histories.append(history.history)\n print('Iteration', i+1)\n print(\"--- %s seconds on ---\" % (time.time() - start_time))\n\n with open('history/mfcc20_zcr/_FS'+str(filter_sizes_names[Findex])+'_FM_'+str(feature_maps_names[Mindex])+'_data_'+str(data_names[Dindex])+'.pkl', 'wb') as f:\n pickle.dump(histories, f)\n\n", "sub_path": "run_model.py", "file_name": "run_model.py", "file_ext": "py", "file_size_in_byte": 15676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "six.moves.cPickle.dump", "line_number": 38, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 38, "usage_type": "name"}, {"api_name": "six.moves.cPickle.HIGHEST_PROTOCOL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "six.moves.cPickle.load", "line_number": 48, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 48, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_items", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 116, "usage_type": "call"}, {"api_name": "six.moves.cPickle.load", "line_number": 119, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 119, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 130, "usage_type": "call"}, {"api_name": "six.moves.cPickle.load", "line_number": 133, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.isin", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 158, "usage_type": "attribute"}, {"api_name": "six.moves.cPickle.load", "line_number": 165, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 165, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 325, "usage_type": "call"}, {"api_name": "time.time", "line_number": 352, "usage_type": "call"}, {"api_name": "multiap_cnn_model.build_cnn", "line_number": 355, "usage_type": "call"}, {"api_name": "keras.optimizers.Adadelta", "line_number": 366, "usage_type": "call"}, {"api_name": "keras_metrics.precision", "line_number": 367, "usage_type": "call"}, {"api_name": "keras_metrics.recall", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 376, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 378, "usage_type": "call"}, {"api_name": "time.time", "line_number": 383, "usage_type": "call"}, {"api_name": "six.moves.cPickle.dump", "line_number": 386, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 386, "usage_type": "name"}]} +{"seq_id": "278790596", "text": "import os \r\nfrom os import listdir\r\n\r\nfrom openvino.inference_engine import IECore\r\nimport cv2\r\nimport numpy as np\r\nfrom scipy.spatial.distance import cosine\r\nimport imutils\r\nimport dlib\r\nfrom numpy import savez_compressed\r\n\r\n\r\ndet_model=r\"C:\\Users\\LENOVO\\Desktop\\FaceReid\\detection_model\\face-detection-0202.xml\"\r\ndet_weights=os.path.splitext(det_model)[0] +'.bin'\r\n\r\nreid_model=r\"C:\\Users\\LENOVO\\Desktop\\Detect&Recognize\\face_net_mobile_face\\model-0000.xml\"\r\nreid_weights=os.path.splitext(reid_model)[0] +'.bin'\r\n\r\ndef output_handler(frame,result,height,width):\r\n\tfaces=list()\r\n\tfor box in result[0][0]:\r\n\t\tif box[2]>0.5:\r\n\t\t\txmin=int(box[3] *width)\r\n\t\t\tymin=int(box[4] *height)\r\n\t\t\txmax=int(box[5] *width)\r\n\t\t\tymax=int(box[6] *height)\r\n\r\n\t\t\tface=frame[ymin:ymax,xmin:xmax]\r\n\t\t\tface=cv2.resize(face,(112,112))\r\n\t\t\tfaces.append(face)\r\n\treturn faces\r\n\r\ndef extract_face(filename):\r\n\tplugin=IECore()\r\n\tnet=plugin.read_network(model=det_model,weights=det_weights)\r\n\texec_net=plugin.load_network(network=net,device_name=\"CPU\")\r\n\r\n\tinput_blob=list(net.input_info.keys())[0]\r\n\toutput_blob=next(iter(net.outputs))\r\n\r\n\tb,c,h,w=net.input_info[input_blob].input_data.shape\r\n\r\n\timage=cv2.imread(filename)\r\n\theight=image.shape[0]\r\n\twidth=image.shape[1]\r\n\r\n\tp_image=cv2.resize(image,(w,h))\r\n\tp_image=p_image.transpose((2,0,1))\r\n\tp_image=p_image.reshape(1,3,h,w)\r\n\r\n\tinfer_request=exec_net.start_async(request_id=0,inputs={input_blob:p_image})\r\n\tstatus=exec_net.requests[0].wait(-1)\r\n\r\n\tif status==0:\r\n\t\tresult=exec_net.requests[0].outputs[output_blob]\r\n\t\treturn output_handler(image,result,height,width)[0]\r\n\r\ndef reidentify(test_subject):\r\n\treid_plugin=IECore()\r\n\treid_net=reid_plugin.read_network(model=reid_model,weights=reid_weights)\r\n\treid_execnet=reid_plugin.load_network(network=reid_net,device_name=\"CPU\")\r\n\r\n\r\n\treid_inputblob=list(reid_net.input_info.keys())[0]\r\n\treid_outputblob=next(iter(reid_net.outputs))\r\n\tb,c,h,w=reid_net.input_info[reid_inputblob].input_data.shape\r\n\tp_image=cv2.cvtColor(test_subject,cv2.COLOR_BGR2RGB)\r\n\tp_image=cv2.resize(test_subject,(w,h))\r\n\tp_image=p_image.transpose((2,0,1))\r\n\tp_image=p_image.reshape(1,3,h,w)\r\n\r\n\r\n\tinfer_request=reid_execnet.start_async(request_id=0,inputs={reid_inputblob:p_image})\r\n\tstatus=reid_execnet.requests[0].wait(-1)\r\n\tif status==0:\r\n\t\tresult=reid_execnet.requests[0].outputs[reid_outputblob]\r\n\r\n\t\t#This stores embeddings\r\n\t\t#print(result[0])\r\n\t\t#print('storing embedding')\r\n\t\t#savez_compressed('tariq3.npz',result[0])\r\n\r\n\t\t#return np.array(result).reshape((1,256))[0]\r\n\t\treturn result[0]\r\n\r\ndef is_match(known_embedding,candidate_embedding,thresh=0.5):\r\n\t#calculate the distance between embeddings\r\n\tscore=cosine(known_embedding,candidate_embedding)\r\n\t#score= np.sqrt(np.sum(np.square(np.subtract(known_embedding, candidate_embedding))))\r\n\tif score<=thresh:\r\n\t\tprint('face is a match',('Score: ',score,' Threshold: ',thresh))\r\n\telse:\r\n\t\tprint('face is not a match',('Score: ',score,' Threshold: ',thresh))\r\n\r\n\r\nMOT={}\r\n\r\n\r\nANC={\r\n\t#'anthony-mackie':\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/anthony-mackie/Anthony Mackie28_529.jpg\",\r\n\t#'daniel-kaluuya':\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/daniel-kaluuya/download (1).jpg\",\r\n\t#'idris-elba':\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/idris-elba/images (2).jpg\",\r\n\t#'kanye-west':\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/kanye-west/images (15).jpg\",\r\n\t#'lupita':\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/lupita/download.jpg\"\r\n\t'michael-blackson':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/michael-blackson/503862_v9_bb.jpg\",\r\n\t'morgan-freeman':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/morgan-freeman/2402.jpg\",\r\n\t'obama':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/obama/barack obama40_712.jpg\",\r\n\t'olivia-pope':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/olivia-pope/download (1).jpg\"\r\n\t#'rihanna':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/rihanna/images (1).jpg\",\r\n\t#'thiery-henry':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/thiery-henry/images (62).jpg\",\r\n\t#'viola-davis':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/viola-davis/‘HTGAWM’s-Viola-Davis-Why-Playing-Annalise-Keating-Has-‘Meant-Everything.jpg\",\r\n\t#'will-smith':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/will-smith/download (3).jpg\",\r\n\t#'zendaya':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/zendaya/591658_v9_bb.jpg\",\r\n\t#'zoe-saldana':\"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/zoe-saldana/e708c468969d68c966422f5962e7f69453-2-zoe-saldana.2x.rhorizontal.w710.jpg\"\r\n\r\n\r\n\t}\r\n\r\ndirectory=\"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeTwo\"\r\n\r\n\r\n\r\n\r\nfor name in listdir(directory):\r\n\tprint('Now on: ',name)\r\n\tcount=0\r\n\tMOT[str(name)]=[]\r\n\tpath=directory+'/' +name\r\n\r\n\t\r\n\r\n\tfor file in listdir(path):\r\n\t\tcurrent_location=path+'/' +file\r\n\t\textracted_ancFace=extract_face(ANC[str(name)])\r\n\t\ttest_img=extract_face(current_location)\r\n\r\n\t\temb1=reidentify(extracted_ancFace)\r\n\t\temb2=reidentify(test_img)\r\n\r\n\t\tscore=cosine(emb1,emb2)\r\n\r\n\t\tMOT[str(name)].append(score)\r\n\r\n\tprint('End of ',name)\r\n\r\n\t\t\r\n\r\n\r\n\t\t\r\n\r\nprint(MOT)\r\n\r\nprint(\"\\n\")\r\n\r\nprint(\"Bias Percentages\")\r\nprint(\"\\n\")\r\n\r\nprint(\"At Threshold 0.5\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.5:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\n\r\nprint(\"At Threshold 0.55\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.55:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.6\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.6:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.65\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.65:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.70\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.70:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.75\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.75:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.80\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.80:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.85\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.85:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\nprint(\"At Threshold 0.90\")\r\n\r\nfor name in MOT.keys():\r\n\tvalues=MOT[str(name)]\r\n\tmasked=[]\r\n\tfor val in values:\r\n\t\tif val<=0.90:\r\n\t\t\tmasked.append(1)\r\n\t\telse:\r\n\t\t\tmasked.append(0)\r\n\r\n\tpercentage=sum(masked)/(len(masked)-1)\r\n\r\n\tprint(name,\" accuracy: \",percentage)\r\n\tprint(\"\\n\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "Evaluating the Oneshot model/modelEval-Straightface.py", "file_name": "modelEval-Straightface.py", "file_ext": "py", "file_size_in_byte": 7910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.splitext", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IECore", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 47, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IECore", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 124, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 132, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "114287718", "text": "import pgl\nimport paddle.fluid as F\nimport paddle.fluid.layers as L\nfrom models.base import BaseNet, BaseGNNModel\nfrom models.ernie_model.ernie import ErnieModel\nfrom models.ernie_model.ernie import ErnieGraphModel\nfrom models.ernie_model.ernie import ErnieConfig\n\n\nclass ErnieSageV2(BaseNet):\n\n def build_inputs(self):\n inputs = super(ErnieSageV2, self).build_inputs()\n term_ids = L.data(\n \"term_ids\", shape=[None, self.config.max_seqlen], dtype=\"int64\", append_batch_size=False)\n return inputs + [term_ids]\n\n def gnn_layer(self, gw, feature, hidden_size, act, initializer, learning_rate, name):\n def ernie_send(src_feat, dst_feat, edge_feat):\n \"\"\"doc\"\"\"\n cls = L.fill_constant_batch_size_like(src_feat[\"term_ids\"], [-1, 1, 1], \"int64\", 1)\n src_ids = L.concat([cls, src_feat[\"term_ids\"]], 1)\n dst_ids = dst_feat[\"term_ids\"]\n\n sent_ids = L.concat([L.zeros_like(src_ids), L.ones_like(dst_ids)], 1)\n term_ids = L.concat([src_ids, dst_ids], 1)\n\n term_ids.stop_gradient = True\n sent_ids.stop_gradient = True\n ernie = ErnieModel(\n term_ids, sent_ids,\n config=self.config.ernie_config)\n feature = ernie.get_pooled_output()\n return feature\n\n def erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name):\n feature = L.unsqueeze(feature, [-1])\n msg = gw.send(ernie_send, nfeat_list=[(\"term_ids\", feature)])\n neigh_feature = gw.recv(msg, lambda feat: F.layers.sequence_pool(feat, pool_type=\"sum\"))\n\n term_ids = feature\n cls = L.fill_constant_batch_size_like(term_ids, [-1, 1, 1], \"int64\", 1)\n term_ids = L.concat([cls, term_ids], 1)\n term_ids.stop_gradient = True\n ernie = ErnieModel(\n term_ids, L.zeros_like(term_ids),\n config=self.config.ernie_config)\n self_feature = ernie.get_pooled_output()\n\n self_feature = L.fc(self_feature,\n hidden_size,\n act=act,\n param_attr=F.ParamAttr(name=name + \"_l\",\n learning_rate=learning_rate),\n )\n neigh_feature = L.fc(neigh_feature,\n hidden_size,\n act=act,\n param_attr=F.ParamAttr(name=name + \"_r\",\n learning_rate=learning_rate),\n )\n output = L.concat([self_feature, neigh_feature], axis=1)\n output = L.l2_normalize(output, axis=1)\n return output\n return erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name)\n\n def gnn_layers(self, graph_wrappers, feature):\n features = [feature]\n\n initializer = None\n fc_lr = self.config.lr / 0.001\n\n for i in range(self.config.num_layers):\n if i == self.config.num_layers - 1:\n act = None\n else:\n act = \"leaky_relu\"\n\n feature = self.gnn_layer(\n graph_wrappers[i],\n feature,\n self.config.hidden_size,\n act,\n initializer,\n learning_rate=fc_lr,\n name=\"%s_%s\" % (\"erniesage_v2\", i))\n features.append(feature)\n return features\n\n def __call__(self, graph_wrappers):\n inputs = self.build_inputs()\n feature = inputs[-1]\n features = self.gnn_layers(graph_wrappers, feature)\n outputs = [self.take_final_feature(features[-1], i, \"final_fc\") for i in inputs[:-1]]\n src_real_index = L.gather(graph_wrappers[0].node_feat['index'], inputs[0])\n outputs.append(src_real_index)\n return inputs, outputs\n\n\nclass ErnieSageModelV2(BaseGNNModel):\n def gen_net_fn(self, config):\n return ErnieSageV2(config)\n", "sub_path": "examples/erniesage/models/erniesage_v2.py", "file_name": "erniesage_v2.py", "file_ext": "py", "file_size_in_byte": 4208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "models.base.BaseNet", "line_number": 10, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.data", "line_number": 14, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 14, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant_batch_size_like", "line_number": 21, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 21, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 22, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 22, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 25, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 25, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.zeros_like", "line_number": 25, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.ones_like", "line_number": 25, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 26, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 26, "usage_type": "name"}, {"api_name": "models.ernie_model.ernie.ErnieModel", "line_number": 30, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 37, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 37, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.sequence_pool", "line_number": 39, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 39, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 39, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant_batch_size_like", "line_number": 42, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 42, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 43, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 43, "usage_type": "name"}, {"api_name": "models.ernie_model.ernie.ErnieModel", "line_number": 45, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.zeros_like", "line_number": 46, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 46, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 50, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 53, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 53, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 56, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 56, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 59, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 59, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 62, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 62, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.l2_normalize", "line_number": 63, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 63, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.gather", "line_number": 95, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 95, "usage_type": "name"}, {"api_name": "models.base.BaseGNNModel", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "13869570", "text": "###\n# Script for plotting and animating both PurpleAir and AQY data from a controlled experiment\n# across multiple rounds. If a similar experiment is performed in the future, the filenames\n# and round times can be subsituted into the global variables at the top of the script to\n# produce new plots\n###\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pytz import timezone\nfrom scipy import interpolate\n\nS1 = \"exp5_data/PA1.csv\" # 0 0 0 0\nS2 = \"exp5_data/PA2.csv\" # 2 6 10 14\nS3 = \"exp5_data/PA2.csv\" # 4 8 12 16\nS4 = \"exp5_data/PA4.csv\" # 30 30 30 30\nAERO_EXHAUST = \"exp5_data/AQY BD-1160 Data Export.csv\"\nAERO_DISTANT = \"exp5_data/AQY BD-1161 Data Export.csv\" # 30 4 8 12 16\n\n# Start, End times for each round\nROUND_TIMES = {1: [\"16:28:54\", \"16:55:00\"],\n 2: [\"15:07:52\", \"15:22:54\"],\n 3: [\"15:32:28\", \"15:47:05\"],\n 4: [\"15:58:46\", \"16:12:19\"]}\n\nGLOBAL_START = datetime.strptime(\"15:00:00\", \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))\n\nR1 = [datetime.strptime(ROUND_TIMES[1][0], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific')), \\\n datetime.strptime(ROUND_TIMES[1][1], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))]\nR2 = [datetime.strptime(ROUND_TIMES[2][0], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific')), \\\n datetime.strptime(ROUND_TIMES[2][1], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))]\nR3 = [datetime.strptime(ROUND_TIMES[3][0], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific')), \\\n datetime.strptime(ROUND_TIMES[3][1], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))]\nR4 = [datetime.strptime(ROUND_TIMES[4][0], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific')), \\\n datetime.strptime(ROUND_TIMES[4][1], \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))]\n\nround_list = [R1, R2, R3, R4]\n\nR2_RANGE = [datetime.strptime(\"15:02:00\", \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific')), \\\n datetime.strptime(\"15:28:00\", \"%H:%M:%S\").replace(tzinfo=timezone('US/Pacific'))]\n\ndef dateToMinutes(start, date):\n delta = date - start\n secs = delta.total_seconds()\n return secs/60\n\ndef purple_air_full(filename):\n df = pd.read_csv(filename)\n pm = df[\"pm2_5_atm\"].tolist()\n times = [x[-9:-1] for x in df[\"UTCDateTime\"].tolist()]\n dates = [datetime.strptime(x, \"%H:%M:%S\") for x in times]\n utcTimes = [x.replace(tzinfo=timezone('UTC')) for x in dates]\n pstTimes = [x.astimezone(timezone('US/Pacific'))-timedelta(minutes=7) for x in utcTimes]\n for i in range(len(pstTimes)):\n if pstTimes[i].day == 31:\n pstTimes[i] = pstTimes[i]+timedelta(days=1)\n return [pm, [dateToMinutes(GLOBAL_START, x) for x in pstTimes]]\n\nS1F = purple_air_full(S1)\nS2F = purple_air_full(S2)\nS3F = purple_air_full(S3)\nS4F = purple_air_full(S4)\n\ndef purple_air(filename, timeRange = None):\n df = pd.read_csv(filename)\n pm = df[\"pm2_5_atm\"].tolist()\n times = [x[-9:-1] for x in df[\"UTCDateTime\"].tolist()]\n dates = [datetime.strptime(x, \"%H:%M:%S\") for x in times]\n utcTimes = [x.replace(tzinfo=timezone('UTC')) for x in dates]\n pstTimes = [x.astimezone(timezone('US/Pacific'))-timedelta(minutes=7) for x in utcTimes]\n for i in range(len(pstTimes)):\n if pstTimes[i].day == 31:\n pstTimes[i] = pstTimes[i]+timedelta(days=1)\n if timeRange != None:\n newVals = []\n newPstTimes = []\n for i in range(len(pstTimes)):\n date = pstTimes[i]\n if timeRange[0] < date and date < timeRange[1]:\n newVals.append(pm[i])\n newPstTimes.append(dateToMinutes(timeRange[0], date))\n return (newPstTimes, newVals)\n rounds = []\n for _ in range(5):\n rounds.append([[], []])\n for i in range(len(pstTimes)):\n date = pstTimes[i]\n if R1[0] < date and date < R1[1]:\n rounds[0][0].append(pm[i])\n rounds[0][1].append(dateToMinutes(R1[0], date))\n elif R2[0] < date and date < R2[1]:\n rounds[1][0].append(pm[i])\n rounds[1][1].append(dateToMinutes(R2[0], date))\n elif R3[0] < date and date < R3[1]:\n rounds[2][0].append(pm[i])\n rounds[2][1].append(dateToMinutes(R3[0], date))\n elif R4[0] < date and date < R4[1]:\n rounds[3][0].append(pm[i])\n rounds[3][1].append(dateToMinutes(R4[0], date))\n return rounds\n\nS1R = purple_air(S1)\nS2R = purple_air(S2)\nS3R = purple_air(S3)\nS4R = purple_air(S4)\n\ndef subtract(xR, yR):\n f = interpolate.interp1d(yR[1], yR[0], fill_value='extrapolate')\n yInterp = f(xR[1])\n return [a - b for a, b in zip(xR[0], yInterp)]\n\ndef subtractTest():\n plt.plot(S1R[0][1], S1R[0][0])\n testY = [[x+100 for x in S1R[0][0]], [y+4 for y in S1R[0][1]]]\n plt.plot(testY[1], testY[0])\n plt.plot(S1R[0][1], subtract(S1R[0], testY))\n plt.show()\n\ndef divide(xR, yR):\n f = interpolate.interp1d(yR[1], yR[0], fill_value='extrapolate')\n yInterp = f(xR[1])\n return [a / b for a, b in zip(xR[0], yInterp)]\n\ndef testPlot():\n plt.plot(S1R[2][1], S1R[2][0])\n plt.plot(S3R[2][1], S3R[2][0])\n plt.plot(S4R[2][1], S4R[2][0])\n plt.legend([\"1\", \"3\", \"4\"])\n plt.show()\n\ndef s2_ratios():\n r2s2sub = [subtract(S2R[1], S4R[1]), S2R[1][1]]\n r2s1sub = [subtract(S1R[1], S4R[1]), S1R[1][1]]\n r2div = divide(r2s2sub, r2s1sub)\n plt.plot(S2R[1][1], r2div)\n\n r3s2sub = [subtract(S2R[2], S4R[2]), S2R[2][1]]\n r3s1sub = [subtract(S1R[2], S4R[2]), S1R[2][1]]\n r3div = divide(r3s2sub, r3s1sub)\n plt.plot(S2R[2][1], r3div)\n\n r4s2sub = [subtract(S2R[3], S4R[3]), S2R[3][1]]\n r4s1sub = [subtract(S1R[3], S4R[3]), S1R[3][1]]\n r4div = divide(r4s2sub, r4s1sub)\n plt.plot(S2R[3][1], r4div)\n\n plt.legend([\"Round 2 (2 ft)\", \"Round 3 (6 ft)\", \"Round 4 (10 ft)\"])\n plt.show()\n\ndef s3_ratios():\n r2s3sub = [subtract(S3R[1], S4R[1]), S3R[1][1]]\n r2s1sub = [subtract(S1R[1], S4R[1]), S1R[1][1]]\n r2div = divide(r2s3sub, r2s1sub)\n plt.plot(S3R[1][1], r2div)\n\n r3s3sub = [subtract(S3R[2], S4R[2]), S3R[2][1]]\n r3s1sub = [subtract(S1R[2], S4R[2]), S1R[2][1]]\n r3div = divide(r3s3sub, r3s1sub)\n plt.plot(S3R[2][1], r3div)\n\n r4s3sub = [subtract(S3R[3], S4R[3]), S3R[3][1]]\n r4s1sub = [subtract(S1R[3], S4R[3]), S1R[3][1]]\n r4div = divide(r4s3sub, r4s1sub)\n plt.plot(S3R[3][1], r4div)\n\n plt.legend([\"Round 2 (4 ft)\", \"Round 3 (8 ft)\", \"Round 4 (12 ft)\"])\n plt.show()\n\ndef aero(filename, param, plot=True, timeRange=None):\n df = pd.read_csv(filename)\n vals = df[param].tolist()\n times = [x[-5:] for x in df[\"Time\"].tolist()]\n dates = [datetime.strptime(x, \"%H:%M\") for x in times]\n pstTimes = [x.replace(tzinfo=timezone('US/Pacific')) for x in dates]\n for i in range(len(pstTimes)):\n if pstTimes[i].day == 31:\n pstTimes[i] = pstTimes[i]+timedelta(days=1)\n if timeRange != None:\n newVals = []\n newPstTimes = []\n for i in range(len(pstTimes)):\n date = pstTimes[i]\n if timeRange[0] < date and date < timeRange[1]:\n newVals.append(vals[i])\n newPstTimes.append(dateToMinutes(timeRange[0], date))\n vals = newVals\n pstTimes = newPstTimes\n else:\n pstTimes = [dateToMinutes(GLOBAL_START, x) for x in pstTimes]\n if plot:\n plt.plot(pstTimes, vals)\n return pstTimes, vals\n\ndef fig1():\n \"\"\" Plots PM2.5 levels over the entire experiment for each PurpleAir and AQY sensor \"\"\"\n plt.plot(S1F[1], S1F[0])\n plt.plot(S2F[1], S2F[0])\n plt.plot(S4F[1], S4F[0])\n aero(AERO_EXHAUST, \"PM2.5 (µg/m³)\")\n aero(AERO_DISTANT, \"PM2.5 (µg/m³)\")\n addTimeLines(GLOBAL_START)\n plt.legend([\"PurpleAir 1\", \"PurpleAir 2\", \"PurpleAir 4\", \"Aeroqual Exhaust\", \"Aeroqual Distant\"])\n plt.xlabel(\"Time [mins]\")\n plt.ylabel(\"PM$_{2.5}$ Concentration [$\\mu$g/m$^3$]\")\n plt.title(\"PM$_{2.5}$ Measurements\")\n plt.show()\n\ndef fig2():\n \"\"\" Plots NO2 levels over the entire experiment for each AQY sensor \"\"\"\n aero(AERO_EXHAUST, \"NO2 (ppb)\")\n aero(AERO_DISTANT, \"NO2 (ppb)\")\n addTimeLines(GLOBAL_START)\n plt.legend([\"Exhaust\", \"Distant\"])\n plt.title(\"Aeroqual NO2 Measurements\")\n plt.xlabel(\"Time [mins]\")\n plt.ylabel(\"NO2 Concentration [ppb]\")\n plt.show()\n\ndef fig3():\n \"\"\" Plots O3 levels over the entire experiment for each AQY sensor \"\"\"\n aero(AERO_EXHAUST, \"O3 (ppb)\")\n aero(AERO_DISTANT, \"O3 (ppb)\")\n addTimeLines(GLOBAL_START)\n plt.legend([\"Exhaust\", \"Distant\"])\n plt.title(\"Aeroqual O3 Measurements\")\n plt.xlabel(\"Time [mins]\")\n plt.ylabel(\"O3 Concentration [ppb]\")\n plt.show()\n\ndef fig4():\n \"\"\" Plots PM2.5 measurements for sensors 1 and 4 during round 2 \"\"\"\n fig, ax = plt.subplots()\n S1X, S1Y = purple_air(S1, R2_RANGE)\n S4X, S4Y = purple_air(S4, R2_RANGE)\n plt.plot(S1X, S1Y)\n plt.plot(S4X, S4Y)\n ax.set_ylim(-20, 200)\n addGray(ax, R2_RANGE[0], [2])\n plt.legend([\"1 ft from vehicle\", \"30 ft from vehicle\"])\n plt.xlabel(\"Time [mins]\")\n plt.ylabel(\"PM$_{2.5}$ Concentration [$\\mu$g/m$^3$]\")\n plt.title(\"PM$_{2.5}$ by Distance\")\n plt.show()\n\ndef addTimeLines(start, rounds = [1, 2, 3, 4]):\n for i in rounds:\n plt.axvline(x=dateToMinutes(start, round_list[i-1][0]), color=(1, 0, 0))\n plt.axvline(x=dateToMinutes(start, round_list[i-1][1]), color=(0, 0, 1))\n\ndef addGray(ax, start, rounds = [1, 2, 3, 4]):\n for i in rounds:\n ax.axvspan(dateToMinutes(start, round_list[i-1][0]), dateToMinutes(start, round_list[i-1][1]), alpha=0.2, color='gray')\n\ndef getInterpPoints(x1, y1, x2, y2, numPoints):\n \"\"\" Outputs numPoints interpolated points from [x1, x2) \"\"\"\n Xs = []\n for i in range(numPoints):\n newX = x1 + (i * (x2-x1) / numPoints)\n Xs.append(newX)\n Ys = []\n f = interpolate.interp1d([x1, x2], [y1, y2], fill_value='extrapolate')\n for x in Xs:\n Ys.append(f(x).item())\n return Xs, Ys\n\ndef animate(framerate=60, speedMultiplier=2):\n \"\"\" Generates an animation of NO2 concentration over time for both AQY sensors \"\"\"\n plt.rcParams['animation.ffmpeg_path'] = 'C:\\\\ffmpeg\\\\bin\\\\ffmpeg.exe'\n\n fig, ax = plt.subplots()\n xdata, ydata = [], []\n ln, = plt.plot([], [], color=(0, 0, 0))\n\n size = 15\n fig.set_size_inches(size, size/1.777)\n\n plt.ylabel(\"NO$_2$ Concentration [ppb]\")\n plt.xlabel(\"Time [mins]\")\n plt.title(\"NO$_{2}$ Concentration Over Time\")\n\n locs1, vals1 = aero(AERO_EXHAUST, \"NO2 (ppb)\", False, R1)\n locs2, vals2 = aero(AERO_DISTANT, \"NO2 (ppb)\", False, R1)\n\n dataPreInterp = [[locs1, vals1], [locs2, vals2]]\n data = []\n\n for line in dataPreInterp:\n Xs, Ys = [], []\n for i in range(len(line[0])-1):\n interp = getInterpPoints(line[0][i], line[1][i], line[0][i+1], line[1][i+1], framerate)\n Xs.extend(interp[0])\n Ys.extend(interp[1])\n Xs.append(line[0][-1])\n Ys.append(line[1][-1])\n data.append([Xs, Ys])\n \n xyData = [[[], []], [[], []]]\n\n colors = [(0.796875, 0.14453125, 0.16015625), (0.22265625, 0.4140625, 0.69140625)]\n \n lines = []\n for index in range(len(data)):\n lobj = ax.plot([],[],lw=2,color=colors[index])[0]\n lines.append(lobj)\n \n def initTest():\n ax.set_xlim(-20, len(locs1)+10)\n ax.set_ylim(-10, 100)\n return ln,\n \n def frameTest(i):\n xdata.append(data[0][0][i])\n ydata.append(data[0][1][i])\n ln.set_data(xdata, ydata)\n return ln,\n \n def init():\n ax.set_xlim(-2, len(locs1)+2)\n ax.set_ylim(-2, 80)\n for line in lines:\n line.set_data([],[])\n legend = plt.legend([\"1 ft from vehicle\", \"30 ft from vehicle\"], loc=\"upper left\")\n for i in range(len(data)):\n legend.legendHandles[i].set_color(colors[i])\n return lines\n\n def frame(i):\n for n in range(len(data)):\n if i < len(data[n][0]) and i < len(data[n][1]):\n xyData[n][0].append(data[n][0][i])\n xyData[n][1].append(data[n][1][i])\n for n in range(len(lines)):\n if i < len(data[n][0]) and i < len(data[n][1]):\n lines[n].set_data(xyData[n][0], xyData[n][1])\n legend = plt.legend([\"1 ft from vehicle\", \"30 ft from vehicle\"], loc=\"upper left\")\n for i in range(len(data)):\n legend.legendHandles[i].set_color(colors[i])\n return lines + [legend]\n \n anim = animation.FuncAnimation(fig, frame, init_func=init,\n frames=len(data[0][0]), interval=20, blit=True)\n \n FFMpegWriter = animation.writers['ffmpeg']\n writer = FFMpegWriter(fps=framerate*speedMultiplier, metadata=dict(artist='Me'), bitrate=1800)\n \n anim.save('basic_animation_exp4.mp4', writer=writer)\n\ndef main():\n fig1()\n\nif __name__ == \"__main__\":\n main()", "sub_path": "AQY_Analysis/exp.py", "file_name": "exp.py", "file_ext": "py", "file_size_in_byte": 12989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 56, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 73, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 77, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 112, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 265, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 272, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.animation.writers", "line_number": 347, "usage_type": "attribute"}, {"api_name": "matplotlib.animation", "line_number": 347, "usage_type": "name"}]} +{"seq_id": "119745185", "text": "import hashlib,json\nfrom collections import OrderedDict\nclass My_MerkTree:\n\n\tdef __init__(self,listoftransaction=None):\n\t\tself.listoftransaction = listoftransaction\n\t\tself.past_transaction = []\n\n\tdef create_tree(self):\n\n\t\tlistoftransaction = self.listoftransaction\n\t\tpast_transaction = self.past_transaction\n\t\ttemp_transaction = []\n\t\ttempDict = OrderedDict()\n\n\t\tfor index in range(0,len(listoftransaction),2):\n\t\t\tcurrent = listoftransaction[index]\n\n\t\t\tif index+1 != len(listoftransaction):\n\t\t\t\tcurrent_right = listoftransaction[index+1]\n\t\t\telse:\n\t\t\t\tcurrent_right = current\n\n\t\t\tcurrent_hash = hashlib.sha256(hashlib.sha256(current.encode('utf-8')).hexdigest().encode('utf-8'))\n\t\t\tcurrent_right_hash = hashlib.sha256(hashlib.sha256(current_right.encode('utf-8')).hexdigest().encode('utf-8'))\n\n\t\t\ttempDict[listoftransaction[index]] = current_hash.hexdigest()\n\n\t\t\tif index+1 != len(listoftransaction):\n\t\t\t\ttempDict[listoftransaction[index+1]] = current_right_hash.hexdigest()\n\n\t\t\ttemp_transaction.append(current_hash.hexdigest() + current_right_hash.hexdigest())\n\t\tpast_transaction.append(tempDict)\n\n\t\tif len(listoftransaction) != 1:\n\t\t\tself.listoftransaction = temp_transaction\n\t\t\tself.past_transaction = past_transaction\n\t\t\tself.create_tree()\n\n\tdef get_past_transaction(self):\n\t\treturn self.past_transaction\n\n\tdef get_root_leaf(self):\n\t\tlast_key = list(self.past_transaction[-1].keys())[-1]\n\t\treturn self.past_transaction[-1][last_key]\n\n# Declare the main part of the function to run\nif __name__ == \"__main__\":\n\n\t# a) Create the new class of My_MerkTree\n\tMy_Tree = My_MerkTree()\n\n\t# b) Give list of transaction\n\ttransaction = ['a','b','c','d']\n\n\t# c) pass on the transaction list \n\tMy_Tree.listoftransaction = transaction\n\n\t# d) Create the Merkle Tree transaction\n\tMy_Tree.create_tree()\n\n\t# e) Retrieve the transaction \n\tpast_transaction = My_Tree.get_past_transaction()\n\n\t# f) Get the last transaction and print all \n\tprint (\"First Example - Even number of transaction Merkel Tree\")\n\tprint ('Final root of the tree : ',My_Tree.get_root_leaf())\n\tprint(json.dumps(past_transaction, indent=4))\n\tprint (\"-\" * 50 )\n\n\t# h) Second example\n\tprint (\"Second Example - Odd number of transaction Merkel Tree\")\n\tMy_Tree = My_MerkTree()\n\ttransaction = ['a','b','c','d','e']\n\tMy_Tree.listoftransaction = transaction\n\tMy_Tree.create_tree()\n\tpast_transaction = My_Tree.get_past_transaction()\n\tprint ('Final root of the tree : ',My_Tree.get_root_leaf())\n\tprint(json.dumps(past_transaction, indent=4))\n\tprint (\"-\" * 50 )\n\n\t# i) Actual Use Case\n\tprint (\"Final Example - Actuall use case of the Merkle Tree\")\n\n\t# i-1) Declare a transaction - the ground truth\n\tground_truth_Tree = My_MerkTree()\n\tground_truth_transaction = ['a','b','c','d','e']\n\tground_truth_Tree.listoftransaction = ground_truth_transaction\n\tground_truth_Tree.create_tree()\n\tground_truth_past_transaction = ground_truth_Tree.get_past_transaction()\n\tground_truth_root = ground_truth_Tree.get_root_leaf()\n\n\t# i-2) Declare a tampered transaction\n\ttampered_Tree = My_MerkTree()\n\ttampered_Tree_transaction = ['a','b','c','d','f']\n\ttampered_Tree.listoftransaction = tampered_Tree_transaction\n\ttampered_Tree.create_tree()\n\ttampered_Tree_past_transaction = tampered_Tree.get_past_transaction()\n\ttampered_Tree_root = tampered_Tree.get_root_leaf()\n\n\t# i-3) The three company share all of the transaction \n\tprint ('Company A - my final transaction hash : ',ground_truth_root)\n\tprint ('Company B - my final transaction hash : ',ground_truth_root)\n\tprint ('Company C - my final transaction hash : ',tampered_Tree_root)\n\n\t# i-4) Print out all of the past transaction\n\tprint (\"\\n\\nGround Truth past Transaction \")\n\tprint(json.dumps(ground_truth_past_transaction, indent=4))\n\t\n\tprint (\"\\n\\nTamper Truth past Transaction \")\n\tprint(json.dumps(tampered_Tree_past_transaction, indent=4))\n\n\n\n\n\n# ---- END OF THE CODE ------", "sub_path": "merkle.2.py", "file_name": "merkle.2.py", "file_ext": "py", "file_size_in_byte": 3848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.OrderedDict", "line_number": 14, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 24, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 68, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "281551642", "text": "'''DAILY TICKETS SENDING TO A WHATSAPP GROUP'''\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException, WebDriverException\r\nfrom datetime import datetime\r\nfrom time import sleep\r\nimport socket\r\nimport os\r\n\r\ndef is_connected():\r\n try:\r\n socket.create_connection((\"Google\", 80))\r\n return True\r\n except:\r\n is_connected()\r\n\r\n\r\ndef getWeather(url):\r\n resp = requests.get(url)\r\n data = resp.json()\r\n desc = data[\"weather\"][0][\"description\"]\r\n return desc\r\n\r\n\r\ndef getGoldRate(driver, url):\r\n driver.get(url)\r\n html_content = driver.page_source\r\n soup = BeautifulSoup(html_content, 'html.parser')\r\n # get the gold price\r\n td_tags = soup.findAll(\"td\")\r\n count = -1\r\n for tag in td_tags:\r\n count += 1\r\n if tag.text == \"22K-916 Jewellery\":\r\n gold_price = td_tags[count + 1].text\r\n return gold_price\r\n\r\n\r\ndef getCurrencyRate(driver, url):\r\n driver.get(url)\r\n rupee_rate = driver.find_element_by_xpath(\"//input[@class = 'a61j6 vk_gy vk_sh Hg3mWc']\").text\r\n return rupee_rate\r\n\r\n\r\ndef sendMessage(driver, group_name, d, w, c):\r\n try:\r\n\r\n find_user = driver.find_element_by_xpath('//span[@title = \"{}\"]'.format(group_name))\r\n sleep(15)\r\n find_user.click()\r\n text_box = driver.find_element_by_xpath(\"//div[@class = '_3u328 copyable-text selectable-text']\")\r\n text_box.click()\r\n message = (\"*Daily Tickets*\" +\r\n \"~*********************************~TODAY~\" + d.strftime(\"%a, %b %d, %Y\") +\r\n \"~-------------------------------------~WEATHER~\" + w +\r\n \"~-------------------------------------~CURRENCY EXCHANGE RATE~1 Singapore Dollar = \" + c + \" Indian rupees\" +\r\n \"~-------------------------------------~SPICE UP~Have a nice day!\")\r\n sleep(3)\r\n # typing message into the whatsapp message box\r\n for line in message.split(\"~\"):\r\n text_box.send_keys(line)\r\n ActionChains(driver).key_down(Keys.SHIFT).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(\r\n Keys.SHIFT).perform()\r\n text_box.send_keys(Keys.BACKSPACE)\r\n sleep(2)\r\n text_box.send_keys(Keys.ENTER)\r\n sleep(2)\r\n\r\n except Exception as e:\r\n print(\"{} Group doesn't exist!\".format(group_name))\r\n\r\n # MAIN MODULE STARTS HERE\r\n\r\n\r\ngroup_name = \"Deals & Recharge Offer\"\r\nurls = [\"http://api.openweathermap.org/data/2.5/weather?q={}&APPID={}\".format(\"Singapore\",\r\n \"0822b961597ff8fe4e300e208e4aaee6\"),\r\n \"https://www.mustafa.com.sg/\",\r\n \"https://www.google.com/search?q=singapore+to+india+currency+exchange+rate&oq=singapore+to+india+currency+exchange+rate&aqs=chrome..69i57.9300j0j9&sourceid=chrome&ie=UTF-8\",\r\n \"https://web.whatsapp.com/\"]\r\n\r\n#driver = webdriver.Chrome('C:\\\\ChromeDriver\\\\chromedriver.exe')\r\n#driver.maximize_window()\r\nd = datetime.now()\r\nw = getWeather(urls[0])\r\n\r\n\r\nchrome_options = webdriver.ChromeOptions()\r\nchrome_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\r\nchrome_options.add_argument(\"--headless\")\r\nchrome_options.add_argument(\"--disable-dev-shm-usage\")\r\nchrome_options.add_argument(\"--no-sandbox\")\r\ndriver = webdriver.Chrome(executable_path=os.environ.get(\"CHROMEDRIVER_PATH\"), chrome_options=chrome_options)\r\n\r\nc = getCurrencyRate(driver, urls[2])\r\ndriver.get(urls[3])\r\nsleep(10)\r\nsendMessage(driver, group_name, d, w, c)\r\ndriver.quit()", "sub_path": "whatsapp.py", "file_name": "whatsapp.py", "file_ext": "py", "file_size_in_byte": 3890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "socket.create_connection", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.SHIFT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 69, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 69, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys.SHIFT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 70, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.BACKSPACE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 71, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 73, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 73, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "name"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 95, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 95, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 96, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 96, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 100, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 100, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 100, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 100, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "504414090", "text": "import networkx as nx # Imports networkx package for implementation of graphs\nimport matplotlib.pyplot as plt # Imports matplotlib package for plotting and displaying the graphs\n\nnode=[]\nedge=[]\n\n\n# Return a list of nodes of each cycle\ndef cycles(G):\n l = list(nx.simple_cycles(G))\n if len(l) != 0:\n print('No. of cycles in the graph are: ', len(l), '\\n')\n print('The nodes of each cycle are: ', l, '\\n')\n else:\n print('There are no cycles in the given graph\\n')\n return l\n\n\n# Returns a list of edges incoming or outgoing via a pendant node\ndef pendant(G):\n deg = G.degree() # creates a dictionary of degree of each node as value & label of each node as key\n e = []\n min_value = min(deg.values()) # finds the least degree\n if min_value == 1: # if the least degree is 1\n min_keys = [k for k in deg if deg[k] == min_value] # finds all the nodes with the degree 1 i.e., pendant nodes\n print('No. of pendant nodes are: ', len(min_keys), '\\n')\n print('The pendant nodes are: ', min_keys, '\\n')\n e = G.edges(min_keys)+G.in_edges(min_keys) # creates a list of edges incoming or outgoing via a pendant node\n for i in range(0,len(e)):\n e[i]=list(e[i])\n for j in range(0,len(e[i])):\n if e[i][j] in min_keys:\n e[i][j]=e[i][j]+'(pen)'\n else:\n print('There are no pendant nodes in the given graph\\n')\n return e\n\n\n# Draws a graph G\ndef draw(G,pos,name):\n\n nx.draw_networkx(G,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)\n plt.axis('off') # Will not display axes\n plt.title(name) # Will display name on the graph\n plt.show() # Displays the drawn graph\n\n\n# Draws cycles in a graph\ndef draw_cycles(l, pos, name):\n if len(l)==0:\n X=nx.DiGraph()\n draw(X,pos,'No cycles are present the given graph')\n elif len(l)==1 and len(l[0])==1:\n X=nx.DiGraph()\n X.add_edge(l[0][0],l[0][0])\n nx.draw_networkx(X,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)\n plt.axis('off')\n plt.title(name)\n plt.show()\n else:\n for i in range(0, len(l)): # Traverses through each cycle\n X = nx.DiGraph()\n j = 0\n for j in range(0, len(l[i])-1): # Traverses through nodes of each cycle\n X.add_node(l[i][j]) # Adds each node to the cycle graph\n X.add_edge(l[i][j], l[i][j+1]) # Adds each edge to the cycle graph except the last edge\n X.add_edge(l[i][j+1], l[i][0]) # Adds the last edge to the cycle graph\n nx.draw_networkx(X,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)\n plt.axis('off')\n plt.title(name)\n plt.show() # Draws each cycle as a graph\n\n# Creates Directed Graph\nG = nx.DiGraph() # Graph that will contain Main Graph input by user\nG_pend = nx.DiGraph() # Graph that will contain pendant odes\n\nif __name__ == \"__main__\":\n # Inputs details of graph from user\n print('Enter labels of nodes: (NOTE: Enter the nodes in a single line, separated by a single whitespace)\\n')\n node = [x for x in input().split()] # Lambda Expression to convert the input string(with each label separated by a space) & splits them & stores all the labels in a list\n print('Enter number of edges:\\n')\n noe = int(input())\n print(\"Enter each edge: (NOTE: Enter the starting node & ending node of each edge, separated by a single whitespace)\\n\")\n for i in range(0, noe):\n y = []\n y = [x for x in input().split()]\n for i in range(0,2): # Checks whether if user is giving valid edges or not\n if y[i] in node:\n continue\n else:\n print('Please enter edges between the entered nodes only. Try again!!!')\n exit()\n if y[0]==y[1]: # if there is a self loop on a node, then it is represented in the graph as [node]*\n for i in range(0,len(node)):\n if node[i]==y[0]:\n node[i]=y[0]+'*'\n y[0]=y[1]=node[i]\n edge.append(y) # Append each edge nodes to edge list\n G.add_nodes_from(node) # Adds nodes list to the main graph\n G.add_edges_from(edge) # Adds edges list to the main graph\n pos = nx.circular_layout(G) # Fixes the positions of nodes in circular format\n list_cycles = cycles(G) # Call function to find the list of nodes of each cycle in the main graph\n list_pend = pendant(G) # Call function to find the list of edges of pendant nodes in the main graph\n G_pend.add_edges_from(list_pend) # Adds edges of pendant nodes to the graph that displays pendant nodes\n pos1 = nx.circular_layout(G_pend)\n draw(G,pos,'Main Graph') # Draws the main graph\n draw_cycles(list_cycles,pos,'Cycles') # Draws each cycle of main graph, if any. I no cycles are there, the graph will be just an empty graph\n if len(list_pend)!=0:\n draw(G_pend,pos1,'Pendant nodes') # Displays pendant nodes of main graph, if any. I no pendant nodes are there, the graph will be just an empty graph\n else:\n draw(G_pend,pos1,'There are no pendant nodes in the given graph')\n\n\n\n\n\n\n", "sub_path": "Standalone EXE program/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 5266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "networkx.simple_cycles", "line_number": 10, "usage_type": "call"}, {"api_name": "networkx.draw_networkx", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 51, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 54, "usage_type": "call"}, {"api_name": "networkx.draw_networkx", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 62, "usage_type": "call"}, {"api_name": "networkx.draw_networkx", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 74, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 75, "usage_type": "call"}, {"api_name": "networkx.circular_layout", "line_number": 101, "usage_type": "call"}, {"api_name": "networkx.circular_layout", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "422560165", "text": "from __future__ import print_function\n\nimport contextlib\nimport functools\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport time\n\nfrom astropy.io import fits\n\nfrom .backend import CoadditionBackend\nfrom .backend import ProjectionBackend\nfrom .backend import BackendError\n\n\n@contextlib.contextmanager\ndef timer(template):\n \"\"\"\n Usage:\n\n .. code-block:: python\n\n with timer(\"Thing done in {seconds} seconds\"):\n do_thing()\n\n \"\"\"\n start_time = time.time()\n yield\n total_time = time.time() - start_time\n\n print(template.format(seconds=total_time))\n\n\ndef timed(template):\n \"\"\"\n A decorator to measure and log execution time of functions. Example:\n\n .. code-block:: python\n\n @timed(\"Thing done in {seconds} seconds\")\n def do_thing():\n print(\"Foobar\")\n\n \"\"\"\n\n def decorator(fun):\n\n @functools.wraps(fun)\n def wrapper(*args, **kwargs):\n with timer(template):\n return_value = fun(*args, **kwargs)\n return return_value\n\n return wrapper\n\n return decorator\n\n\nclass TemporaryDirectoryShim(object):\n def __init__(self, suffix=\"\", prefix=\"tmp\", dir=None):\n import tempfile\n self.name = tempfile.mkdtemp(suffix, prefix, dir)\n\n def __repr__(self):\n return \"<{} {!r}>\".format(self.__class__.__name__, self.name)\n\n def __enter__(self):\n return self.name\n\n def __exit__(self, exc, value, tb):\n shutil.rmtree(self.name)\n\n\ntry:\n from tempfile import TemporaryDirectory\nexcept:\n TemporaryDirectory = TemporaryDirectoryShim\n\n\nclass SWarpBackend(ProjectionBackend, CoadditionBackend):\n RESAMPLE_SUFFIX = \".resamp.fits\"\n\n PROJECT = 0\n COADD = 1\n\n OPTIONS = {\n PROJECT: [\n \"-resample\", \"y\", \"-combine\", \"n\", \"-center_type\", \"manual\",\n \"-resample_suffix\", RESAMPLE_SUFFIX\n ],\n COADD: [\n \"-resample\", \"n\", \"-combine\", \"y\", \"-center_type\", \"manual\",\n ]\n }\n\n def get_command(self, input_path, output_path, user_parameters, ra, dec,\n num_rows, num_cols, mode):\n \"\"\"\n :return: array of command parts, e.g. something like\n :param mode: either SWarpBackend.PROJECT or SWarpBackend.COADD\n\n ..code-block:: python\n\n [\"swarp\", \"-c\", \"swarp_conf.txt\"]\n\n \"\"\"\n command = [\"swarp\"]\n for name, value in user_parameters:\n command.append(name)\n command.append(str(value))\n\n if not mode in (self.PROJECT, self.COADD):\n raise BackendError(\"Wrong mode: %s\", mode)\n\n command.extend(self.OPTIONS[mode])\n\n # Center coordinates\n command.append(\"-center\")\n command.append(\"{ra},{dec}\".format(ra=ra, dec=dec))\n\n if mode == self.PROJECT:\n output_dir = os.path.dirname(output_path)\n command.append(\"-resample_dir\")\n command.append(output_dir)\n else:\n # Maybe we should set resample_dir here too?\n command.append(\"-imageout_name\")\n command.append(output_path)\n\n # Image size\n command.append(\"-image_size\")\n command.append(\"{rows},{cols}\".format(rows=num_rows, cols=num_cols))\n\n command.append(input_path)\n\n return command\n\n def get_process_stdout_stderr_returncode(self, command):\n popen = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n return popen.communicate() + (popen.returncode,)\n\n @timed(\"project() done in {seconds} seconds\")\n def project(self, header, image, user_parameters=None):\n \"\"\"\n :param header: astropy.io.fits.Header object\n :param image: astropy.io.fits.HDUList object\n\n :return: astropy.io.fits.HDUList object\n \"\"\"\n ra = header[\"CRVAL1\"]\n dec = header[\"CRVAL2\"]\n num_rows = header[\"NAXIS1\"]\n num_cols = header[\"NAXIS2\"]\n\n user_parameters = user_parameters or []\n\n with TemporaryDirectory() as temp_dir:\n input_path = os.path.join(temp_dir, \"input.fits\")\n\n with timer(\"Projection input image written in {seconds} seconds\"):\n image.writeto(input_path)\n\n output_path = os.path.join(temp_dir, \"output.fits\")\n\n self._project(\n input_path,\n output_path,\n user_parameters,\n ra, dec, num_rows, num_cols\n )\n\n with timer(\"Projection output image read in {seconds} seconds\"):\n with open(output_path, \"rb\") as output_file:\n image_data = output_file.read()\n\n output_image = fits.HDUList.fromstring(image_data)\n\n return output_image\n\n def _project(self, input_path, output_path, user_parameters, ra, dec,\n num_rows, num_cols):\n input_path = os.path.abspath(input_path)\n output_path = os.path.abspath(output_path)\n\n output_dir = os.path.dirname(output_path)\n input_name = os.path.basename(input_path)\n\n command = self.get_command(\n input_path, output_path, user_parameters, ra, dec, num_rows,\n num_cols, mode=self.PROJECT\n )\n\n with timer(\"SWarp projection done in {seconds} seconds\"):\n stdout, stderr, returncode = self.get_process_stdout_stderr_returncode(\n command\n )\n\n print(\"Projection stdout:\", stdout, file=sys.stdout)\n print(\"Projection stderr:\", stderr, file=sys.stderr)\n\n # Copy actual output file to the output path\n try:\n input_name_without_ext, ext = input_name.rsplit(\".\")\n except ValueError:\n input_name_without_ext = input_name\n\n actual_output_path = os.path.join(\n output_dir,\n input_name_without_ext + self.RESAMPLE_SUFFIX\n )\n\n msg = \"Moved projection output to destination in {seconds} seconds\"\n with timer(msg):\n shutil.move(actual_output_path, output_path)\n\n return returncode\n\n @timed(\"coadd() done in {seconds} seconds\")\n def coadd(self, header, images, user_parameters=None):\n \"\"\"\n :param header: astropy.io.fits.Header object\n :param images: a list of astropy.io.fits.HDUList objects\n\n :return: astropy.io.fits.HDUList object\n \"\"\"\n ra = header[\"CRVAL1\"]\n dec = header[\"CRVAL2\"]\n num_rows = header[\"NAXIS1\"]\n num_cols = header[\"NAXIS2\"]\n\n user_parameters = user_parameters or []\n\n input_paths = []\n\n with TemporaryDirectory() as temp_dir:\n input_list_path = os.path.join(temp_dir, \"input_file_list\")\n\n with timer(\"Coaddition input written in {seconds} seconds\"):\n for i, image in enumerate(images):\n input_path = os.path.join(\n temp_dir, \"input_{0}.fits\".format(i)\n )\n\n # Write image to input path\n image.writeto(input_path)\n\n input_paths.append(input_path)\n\n with open(input_list_path, \"w\") as input_list_file:\n to_write = \"\\n\".join(path for path in input_paths) + \"\\n\"\n input_list_file.write(to_write)\n\n output_path = os.path.join(temp_dir, \"output.fits\")\n input_list_arg = \"@{path}\".format(path=input_list_path)\n\n return_code = self._coadd(\n input_list_arg,\n output_path,\n user_parameters,\n ra, dec,\n num_rows, num_cols\n )\n\n with timer(\"Coaddition output read in {seconds} seconds\"):\n with open(output_path, \"rb\") as output_file:\n image_data = output_file.read()\n\n output_image = fits.HDUList.fromstring(image_data)\n\n if return_code != 0:\n raise BackendError(\"SWarp returned {code}\".format(code=return_code))\n\n return output_image\n\n def _coadd(self, input_path, output_path, user_parameters, ra,\n dec, num_rows, num_cols):\n command = self.get_command(\n input_path, output_path, user_parameters, ra, dec, num_rows,\n num_cols, mode=self.COADD\n )\n\n with timer(\"SWarp coaddition done in {seconds} seconds\"):\n stdout, stderr, returncode = self.get_process_stdout_stderr_returncode(\n command\n )\n\n print(\"Coaddition stdout:\", stdout, file=sys.stdout)\n print(\"Coaddition stderr:\", stderr, file=sys.stderr)\n\n return returncode\n", "sub_path": "worker/backend/swarp.py", "file_name": "swarp.py", "file_ext": "py", "file_size_in_byte": 8652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 18, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 50, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 64, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 73, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 79, "usage_type": "name"}, {"api_name": "backend.ProjectionBackend", "line_number": 82, "usage_type": "name"}, {"api_name": "backend.CoadditionBackend", "line_number": 82, "usage_type": "name"}, {"api_name": "backend.BackendError", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 141, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.HDUList.fromstring", "line_number": 181, "usage_type": "call"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 181, "usage_type": "attribute"}, {"api_name": "astropy.io.fits", "line_number": 181, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 203, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 219, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.HDUList.fromstring", "line_number": 273, "usage_type": "call"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 273, "usage_type": "attribute"}, {"api_name": "astropy.io.fits", "line_number": 273, "usage_type": "name"}, {"api_name": "backend.BackendError", "line_number": 276, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 292, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 293, "usage_type": "attribute"}]} +{"seq_id": "79959994", "text": "from __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom argparse import ArgumentParser\nimport os\nimport cv2\nimport sys\n\nfrom yolo_model import YOLOModel\nfrom rotation_invariance_model import RotationalInvarianceModel\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--input-width', dest='input_width',\n default=160, type=int, help='input image width')\n parser.add_argument('--input-height', dest='input_height',\n default=120, type=int, help='input image height')\n parser.add_argument('--output-width', dest='output_width',\n default=16, type=int, help='output image width')\n parser.add_argument('--output-height', dest='output_height',\n default=12, type=int, help='output image height')\n parser.add_argument('--yolo-checkpoint', dest='yolo_checkpoint',\n default='YOLO1/YOLO-200000',\n type=str, help='checkpoint to load')\n parser.add_argument('--rotation-checkpoint', dest='rotation_checkpoint',\n default='RotationalInvarianceModel6/RotationalInvarianceModel-100000',\n type=str, help='checkpoint to load')\n parser.add_argument('--threshold', dest='threshold',\n default=0.5, type=float, help='threshold for output')\n args = parser.parse_args()\n\n # yolo graph\n yolo_graph = tf.Graph()\n with yolo_graph.as_default() as g:\n yolo_model = YOLOModel(args.input_width, args.input_height, 3,\n args.output_width, args.output_height, 1, saving=False)\n yolo_sess = tf.Session(graph=g)\n # rotation graph\n rotation_graph = tf.Graph()\n with rotation_graph.as_default() as g:\n rotation_model = RotationalInvarianceModel(64, 3, 10,\n model_name='RotationalInvarianceModel',\n saving=False)\n rotation_sess = tf.Session(graph=g)\n\n if args.yolo_checkpoint != '' and \\\n os.path.isfile(args.yolo_checkpoint + '.meta') and \\\n os.path.isfile(args.yolo_checkpoint + '.index') and \\\n args.rotation_checkpoint != '' and \\\n os.path.isfile(args.rotation_checkpoint + '.meta') and \\\n os.path.isfile(args.rotation_checkpoint + '.index'):\n yolo_model.load(yolo_sess, args.yolo_checkpoint)\n\n camera = cv2.VideoCapture(0)\n if camera.isOpened():\n while True:\n _, img = camera.read()\n inputs = cv2.resize(img, (args.input_width, args.input_height))\n inputs = np.expand_dims(inputs, 0)\n xy_output, size_output, indicator = \\\n yolo_model.predict(yolo_sess, inputs)\n roi_display = np.copy(img)\n\n valid = (indicator > args.threshold).squeeze(-1)\n scores = np.sort(indicator[valid, :]).squeeze(-1)[::-1]\n xys = xy_output[valid, :]\n sizes = size_output[valid, :]\n print(scores)\n print('\\rfound: %d | max score: %f' % (len(xys), indicator.max()))\n sys.stdout.flush()\n for i in range(len(xys)):\n if indicator[valid, :][i] == scores[0]:\n x = xys[i, 0] * 640\n y = xys[i, 1] * 480\n w = sizes[i, 0] * 640\n h = sizes[i, 1] * 480\n p1 = (int(x - w / 2), int(y - h / 2))\n p2 = (int(x + w / 2), int(y + h / 2))\n cv2.rectangle(roi_display, p1, p2, (0, 255, 0), 2)\n rotation_model.predict(rotation_sess,\n inputs[p1[1]:p2[1], p1[0]:p2[0], :].expand_dim(0))\n\n cv2.imshow('Camera image', img)\n cv2.imshow('Indicator',\n cv2.resize(np.squeeze(indicator, axis=0), (640, 480)))\n cv2.imshow('ROI', roi_display)\n key = cv2.waitKey(10)\n if key in [ord('q'), ord('Q'), 10, 27]:\n break\n yolo_sess.close()\n rotation_sess.close()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "rotational-invariance/yolo_inference.py", "file_name": "yolo_inference.py", "file_ext": "py", "file_size_in_byte": 3614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 35, "usage_type": "call"}, {"api_name": "yolo_model.YOLOModel", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 41, "usage_type": "call"}, {"api_name": "rotation_invariance_model.RotationalInvarianceModel", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "yolo_model.load", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 61, "usage_type": "call"}, {"api_name": "yolo_model.predict", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "427506636", "text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\"\"\"\nORB是一种想要代替SIFT和SURF的算法,它更快。\n\n1 FAST features from accelerated segment test,绘制16像素的圆\n2 BRIEF Binary Robust Independt Elementary Features\n3 暴力匹配 \n\"\"\"\n\nimg1 = cv2.imread('4.jpeg', 0)\nimg2 = cv2.imread('5.jpeg', 0)\n\norb = cv2.ORB_create()\n\nkp1, des1 = orb.detectAndCompute(img1, None)\nkp2, des2 = orb.detectAndCompute(img2, None)\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\nmatches = bf.match(des1, des2)\nmatches = sorted(matches, key=lambda x:x.distance)\n\nimg3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:100], img2, flags=2)\n\nplt.imshow(img3)\nplt.show()", "sub_path": "读书笔记《opencv3 计算机视觉 python语言实现》/第6章图像检索/6.1.4_orb.py", "file_name": "6.1.4_orb.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.ORB_create", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.BFMatcher", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.NORM_HAMMING", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.drawMatches", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "232442902", "text": "#! /usr/bin/env python3\n\nimport vcf\nimport httplib2\nimport json\n\n__author__ = 'Gerhard Bilek'\n\n##\n##\n## Aim of this assignment is to annotate the variants with various attributes\n## We will use the API provided by \"myvariant.info\" - more information here: https://docs.myvariant.info\n## NOTE NOTE! - check here for hg38 - https://myvariant.info/faq\n## 1) Annotate the first 900 variants in the VCF file\n## 2) Store the result in a data structure (not in a database)\n## 3) Use the data structure to answer the questions\n##\n## 4) View the VCF in a browser\n##\n\nclass Assignment3:\n \n def __init__(self):\n ## Check if pyvcf is installed\n print(\"PyVCF version: %s\" % vcf.VERSION)\n \n ## Call annotate_vcf_file here\n self.vcf_path = \"chr16.vcf\" # TODO\n\n @property\n def annotate_vcf_file(self):\n '''\n - Annotate the VCF file using the following example code (for 1 variant)\n - Iterate of the variants (use first 900)\n - Store the result in a data structure\n :return:\n ''' \n print(\"TODO\")\n \n ##\n ## Example loop\n ##\n \n ## Build the connection\n h = httplib2.Http()\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n \n params_pos = [] # List of variant positions\n with open(self.vcf_path) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for counter, record in enumerate(vcf_reader):\n params_pos.append(record.CHROM + \":g.\" + str(record.POS) + record.REF + \">\" + str(record.ALT[0]))\n \n if counter >= 899:\n break\n \n ## Build the parameters using the list we just built\n params = 'ids=' + \",\".join(params_pos) + '&hg38=true'\n\n #print(\"Params_Pos: \", params_pos)\n #print(\"Params: \", params)\n\n ## Perform annotation\n res, con = h.request('http://myvariant.info/v1/variant', 'POST', params, headers=headers)\n annotation_result = con.decode('utf-8')\n\n ## Generate json object\n jsonobject = json.loads(annotation_result)\n\n return(jsonobject)\n \n def get_list_of_genes(self, jsonobject):\n '''\n Print the name of genes in the annotation data set\n :return:\n '''\n for object in jsonobject:\n if 'cadd' in object:\n if 'genename' in object['cadd']['gene']:\n print(object['cadd']['gene']['genename'])\n\n #for object in jsonobject:\n # if 'dbsnp' in object:\n # if 'genename' in object['dbsnp']['gene']:\n # print(object['dbsnp']['gene']['genename'])\n\n def get_num_variants_modifier(self, jsonobject):\n '''\n Print the number of variants with putative_impact \"MODIFIER\"\n :return:\n '''\n\n '''\n for object in jsonobject:\n if 'cadd' in object:\n if 'putative_impact' in object['ann']:\n #if 'putative_impact' in object:\n print(\"boom\")\n '''\n counter = 0\n for object in jsonobject:\n if 'snpeff' in object: # (???) snpeff , cadd\n key, value = \"putative_impact\", \"MODIFIER\"\n if key in object['snpeff']['ann'] and value == object['snpeff']['ann']['putative_impact']:\n counter += 1\n return(counter)\n\n def get_num_variants_with_mutationtaster_annotation(self, jsonobject):\n '''\n Print the number of variants with a 'mutationtaster' annotation\n :return:\n '''\n counter = 0\n for object in jsonobject:\n if 'dbnsfp' in object:\n if 'mutationtaster' in object['dbnsfp']:\n counter+=1\n return(counter)\n \n \n def get_num_variants_non_synonymous(self, jsonobject):\n '''\n Print the number of variants with 'consequence' 'NON_SYNONYMOUS'\n :return:\n '''\n\n counter = 0\n for object in jsonobject:\n if 'cadd' in object:\n key, value = \"consequence\", \"NON_SYNONYMOUS\"\n if key in object['cadd'] and value == object['cadd']['consequence']: # value muss bis zum Key definiert werden.\n counter += 1\n return counter\n\n \n def view_vcf_in_browser(self):\n '''\n - Open a browser and go to https://vcf.iobio.io/\n - Upload the VCF file and investigate the details\n :return:\n '''\n\n ## Document the final URL here\n print(\"The vcf file has been compressed and indexed via iTabixIt.app. The two resulting files, the compressed file (gz) and the index file (gz.tbi) were uploaded to https://vcf.iobio.io/\")\n print(\"Resutls: https://vcf.iobio.io/?species=Human&build=GRCh38\")\n \n def print_summary(self):\n annoData = self.annotate_vcf_file # Syntax? Warum ohne Klammern??\n #for object in annoData: print(object) # json objects\n print()\n print(\"List of Genes:\") # 9\n self.get_list_of_genes(annoData)\n print()\n print(\"Num of Modifier: \", self.get_num_variants_modifier(annoData)) # 4\n print()\n print(\"Num of Mutationtaster: \", self.get_num_variants_with_mutationtaster_annotation(annoData)) #5\n print()\n print(\"Num of nonSynonymous: \", self.get_num_variants_non_synonymous(annoData))\n print()\n print(self.view_vcf_in_browser())\n print()\n\ndef main():\n print(\"Assignment 3\")\n assignment3 = Assignment3()\n assignment3.print_summary()\n print(\"Done with assignment 3\")\n \n \nif __name__ == '__main__':\n main()\n \n \n\n\n\n", "sub_path": "assignment3.py", "file_name": "assignment3.py", "file_ext": "py", "file_size_in_byte": 5772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "vcf.VERSION", "line_number": 25, "usage_type": "attribute"}, {"api_name": "httplib2.Http", "line_number": 45, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "162301565", "text": "'''\nHandlers for AJAX (Javascript) functions used in the web interface to start\nexperiments and train BMI decoders\n'''\nimport json, datetime\nimport logging\nimport io, traceback\nimport numpy as np\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import ProtectedError\n\nfrom riglib import experiment\nfrom .json_param import Parameters\nfrom .models import TaskEntry, Feature, Sequence, Task, Generator, Subject, Experimenter, DataFile, System, Decoder, KeyValueStore, import_by_path\nfrom .tasktrack import Track\n\nimport logging\nimport io, traceback\n\nfrom . import exp_tracker # Wrapper for tasktrack.Track\nfrom . import trainbmi\n\nhttp_request_queue = []\n\n@csrf_exempt\ndef train_decoder_ajax_handler(request, idx):\n '''\n AJAX handler for creating a new decoder.\n\n Parameters\n ----------\n request : Django HttpRequest\n POST data containing details for how to train the decoder (type, units, update rate, etc.)\n idx : int\n ID number of the TaskEntry record with the data used to train the Decoder.\n\n Returns\n -------\n Django HttpResponse\n Indicates 'success' if all commands initiated without error.\n '''\n ## Check if the name of the decoder is already taken\n collide = Decoder.objects.filter(entry=idx, name=request.POST['bminame'])\n if len(collide) > 0:\n return _respond(dict(status='error', msg='Name collision -- please choose a different name'))\n update_rate = float(request.POST['bmiupdaterate'])\n\n kwargs = dict(\n entry=idx,\n name=request.POST['bminame'],\n clsname=request.POST['bmiclass'],\n extractorname=request.POST['bmiextractor'],\n cells=request.POST['cells'],\n channels=request.POST['channels'],\n binlen=1./update_rate,\n tslice=list(map(float, request.POST.getlist('tslice[]'))),\n ssm=request.POST['ssm'],\n pos_key=request.POST['pos_key'],\n kin_extractor=request.POST['kin_extractor'],\n zscore=request.POST['zscore'],\n )\n trainbmi.cache_and_train(**kwargs)\n return _respond(dict(status=\"success\"))\n\n\nclass encoder(json.JSONEncoder):\n '''\n Encoder for JSON data that defines how the data should be returned.\n '''\n def default(self, o):\n if isinstance(o, np.ndarray):\n return o.tolist()\n elif isinstance(o, Parameters):\n return o.params\n else:\n return super(encoder, self).default(o)\n\ndef _respond(data):\n '''\n Generic HTTPResponse to return JSON-formatted dictionary values\n\n Parameters\n ----------\n data : dict\n Keys and values can be just about anything\n\n Returns\n -------\n HttpResponse\n JSON-encoded version of the input dictionary\n '''\n return HttpResponse(json.dumps(data, cls=encoder), content_type=\"application/json\")\n\ndef task_info(request, idx, dbname='default'):\n '''\n Get information about the task\n\n Parameters\n ----------\n request : Django HttpRequest\n\n idx : int\n Primary key used to look up the task from the database\n\n Returns\n -------\n JSON-encoded dictionary\n '''\n task = Task.objects.using(dbname).get(pk=idx)\n feats = []\n for name, isset in list(request.GET.items()):\n if isset == \"true\": # box for the feature checked\n feat = Feature.objects.using(dbname).get(name=name)\n feats.append(feat)\n\n filter_kwargs = {'template': True, 'task__id': idx}\n templates = TaskEntry.objects.using(dbname).filter(**filter_kwargs).order_by(\"-date\")\n template_info = [{'id': t.id, 'name': t.entry_name} for t in templates]\n\n metadata = TaskEntry.get_default_metadata()\n\n task_info = dict(params=task.params(feats=feats), generators=task.get_generators(), \\\n templates=template_info, metadata=metadata)\n\n task_cls = task.get(feats=feats)\n if issubclass(task_cls, experiment.Sequence):\n task_info['sequence'] = task.sequences()\n task_info['controls'] = task.controls(feats=feats)\n\n return _respond(task_info)\n\ndef exp_info(request, idx, dbname='default'):\n '''\n Get information about the tasks that have already run\n\n Parameters\n ----------\n request : Django HttpRequest\n POST request triggered by clicking on a task entry from the left side pane\n idx : int\n Primary key used to look up the TaskEntry from the database\n\n Returns\n -------\n JSON-encoded dictionary\n Data containing features, parameters, and any report data from the TaskEntry\n '''\n entry = TaskEntry.objects.using(dbname).get(pk=idx)\n try:\n entry_data = entry.to_json()\n except Exception as e:\n print(\"##### Error trying to access task entry data: id=%s, dbname=%s\" % (idx, dbname))\n import traceback\n exception = traceback.format_exc()\n exception.replace('\\n', '\\n ')\n print(exception.rstrip())\n print(\"#####\")\n return _respond_err(exception)\n else:\n return _respond(entry_data)\n\n@csrf_exempt\ndef add_sequence(request):\n \n print(request.POST)\n sequence = json.loads(request.POST['sequence'])\n task_id = json.loads(request.POST.get('task'))\n seq = Sequence.from_json(sequence)\n task = Task.objects.get(pk=task_id)\n seq.task = task\n seq.save()\n \n return _respond(dict(id=seq.id, name=seq.name)) \n\ndef hide_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n print(\"hide_entry\")\n entry = TaskEntry.objects.get(pk=idx)\n entry.visible = False\n entry.save()\n return _respond(dict())\n\ndef show_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n print(\"hide_entry\")\n entry = TaskEntry.objects.get(pk=idx)\n entry.visible = True\n entry.save()\n return _respond(dict())\n\ndef remove_entry(request, idx):\n print(\"Remove entry %d\" % idx)\n entry = TaskEntry.objects.get(pk=idx)\n try:\n DataFile.objects.filter(entry=entry.id).delete()\n except DataFile.DoesNotExist:\n pass\n try:\n Decoder.objects.filter(entry=entry.id).delete()\n except Decoder.DoesNotExist:\n pass\n entry.delete()\n return _respond(dict())\n\ndef template_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n entry = TaskEntry.objects.get(pk=idx)\n entry.template = True\n entry.save()\n return _respond(dict())\n\ndef untemplate_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n entry = TaskEntry.objects.get(pk=idx)\n entry.template = False\n entry.save()\n return _respond(dict())\n \ndef backup_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n entry = TaskEntry.objects.get(pk=idx)\n entry.backup = True\n entry.save()\n return _respond(dict())\n\ndef unbackup_entry(request, idx):\n '''\n See documentation for exp_info\n '''\n entry = TaskEntry.objects.get(pk=idx)\n entry.backup = False\n entry.save()\n return _respond(dict())\n\ndef gen_info(request, idx):\n try:\n gen = Generator.objects.get(pk=idx)\n return _respond(gen.to_json())\n except:\n traceback.print_exc()\n\ndef start_next_exp(request):\n try:\n req, save = http_request_queue.pop(0)\n return start_experiment(req, save=save)\n except IndexError:\n return _respond(dict(status=\"error\", msg=\"No experiments in queue!\"))\n\n@csrf_exempt\ndef start_experiment(request, save=True, execute=True):\n '''\n Handles presses of the 'Start Experiment' and 'Test' buttons in the browser\n interface\n '''\n #make sure we don't have an already-running experiment\n tracker = Track.get_instance()\n if len(tracker.status.value) != 0:\n print(\"Task is running, exp_tracker.status.value:\", tracker.status.value)\n return _respond(dict(status=\"running\", msg=\"Already running task!\"))\n\n # Try to start the task, and if there are any errors, send them to the browser interface\n try:\n data = json.loads(request.POST['data'])\n\n task = Task.objects.get(pk=data['task'])\n feature_names = list(data['feats'].keys())\n subject_name = data['metadata'].pop('subject')\n subject = Subject.objects.get(name=subject_name)\n experimenter_name = data['metadata'].pop('experimenter')\n experimenter = Experimenter.objects.get(name=experimenter_name)\n project = data['metadata'].pop('project')\n session = data['metadata'].pop('session')\n\n entry = TaskEntry.objects.create(subject_id=subject.id, task_id=task.id, experimenter_id=experimenter.id,\n project=project, session=session)\n if 'entry_name' in data:\n entry.entry_name = data['entry_name']\n if 'date' in data and data['date'] != \"Today\" and len(data['date'].split(\"-\")) == 3:\n datestr = data['date'].split(\"-\")\n print(\"Got custom date: \", datestr)\n entry.date = datetime.datetime(int(datestr[0]), int(datestr[1]), int(datestr[2])) # this does not work: datetime.datetime.strptime(\"%Y-%m-%d\", datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n\n params = Parameters.from_html(data['params'])\n entry.params = params.to_json()\n feats = Feature.getall(feature_names)\n kwargs = dict(subj=entry.subject.id, subject_name=subject_name, base_class=task.get(),\n feats=feats, params=params)\n metadata = Parameters.from_html(data['metadata'])\n entry.metadata = metadata.to_json()\n\n # Save the target sequence to the database and link to the task entry, if the task type uses target sequences\n if issubclass(task.get(feats=feature_names), experiment.Sequence):\n seq = Sequence.from_json(data['sequence'])\n seq.task = task\n if save:\n seq.save()\n entry.sequence = seq\n kwargs['seq'] = seq\n\n response = dict(status=\"testing\", subj=entry.subject.name,\n task=entry.task.name)\n\n if save:\n # tag software version using the git hash\n import git\n repo = git.repo.Repo(__file__, search_parent_directories=True)\n sw_version = repo.commit().hexsha[:8]\n repo_dirty = repo.is_dirty(index=True, working_tree=True, untracked_files=False)\n if repo_dirty:\n sw_version += '.dirty'\n entry.sw_version = sw_version\n\n # Save the task entry to database\n entry.save()\n\n # Link the features used to the task entry\n for feat_name in feature_names:\n f = Feature.objects.get(name=feat_name)\n entry.feats.add(f.pk)\n\n response['date'] = entry.date.strftime(\"%h %d, %Y %I:%M %p\")\n response['status'] = \"running\"\n response['idx'] = entry.ui_id\n\n # Give the entry ID to the runtask as a kwarg so that files can be linked after the task is done\n kwargs['saveid'] = entry.id\n else:\n entry.delete()\n\n # Start the task FSM and tracker\n if execute:\n tracker.runtask(**kwargs)\n else:\n response[\"status\"] = \"completed\"\n\n # Return the JSON response\n return _respond(response)\n\n except Exception as e:\n # Generate an HTML response with the traceback of any exceptions thrown\n import io\n import traceback\n from .tasktrack import log_str\n err = io.StringIO()\n traceback.print_exc(None, err)\n traceback.print_exc() # print again to console\n err.seek(0)\n log_str(err.read()) # log to tasktracker\n err.seek(0)\n tracker.reset() # make sure task is stopped\n return _respond(dict(status=\"error\", msg=err.read()))\n\ndef rpc(fn):\n '''\n Generic remote procedure call function\n\n Parameters\n ----------\n fn : callable\n Function which takes a single argument, the tracker object.\n Return values from this function are ignored.\n\n Returns\n -------\n JSON-encoded dictionary\n '''\n tracker = Track.get_instance()\n\n # make sure that there exists an experiment to interact with\n if tracker.status.value not in [b\"running\", b\"testing\"]:\n print(\"Task not running!\", str(tracker.status.value))\n return _respond(dict(status=\"error\", msg=\"No task running, so cannot run command!\"))\n\n try:\n status = tracker.status.value.decode(\"utf-8\")\n fn_response = fn(tracker)\n response_data = dict(status=\"pending\", msg=status)\n if not fn_response is None:\n response_data['data'] = fn_response\n return _respond(response_data)\n except Exception as e:\n import traceback\n traceback.print_exc()\n return _respond_err(e)\n\ndef _respond_err(e):\n '''\n Default error response from server to webclient\n\n Parameters\n ----------\n e : Exception\n Error & traceback to convert to string format.\n\n Returns\n -------\n JSON-encoded dictionary\n Sets status to \"error\" and provides the specific error message\n '''\n err = io.StringIO()\n traceback.print_exc(None, err)\n err.seek(0)\n return _respond(dict(status=\"error\", msg=err.read()))\n\n@csrf_exempt\ndef stop_experiment(request):\n return rpc(lambda tracker: tracker.stoptask())\n\ndef enable_clda(request):\n return rpc(lambda tracker: tracker.task_proxy.enable_clda())\n\ndef disable_clda(request):\n return rpc(lambda tracker: tracker.task_proxy.disable_clda())\n\ndef set_task_attr(request, attr, value):\n '''\n Generic function to change a task attribute while the task is running.\n '''\n return rpc(lambda tracker: tracker.task_proxy.remote_set_attr(attr, value))\n\n@csrf_exempt\ndef save_notes(request, idx):\n te = TaskEntry.objects.get(pk=idx)\n te.notes = request.POST['notes']\n te.save()\n return _respond(dict(status=\"success\"))\n\ndef reward_drain(request, onoff):\n '''\n Start/stop the \"drain\" of a solenoid reward remotely\n This function is modified to use the reward system in Orsborn lab - check reward.py for functions\n '''\n from riglib import reward\n r = reward.open()\n\n if onoff == 'on':\n r.drain(600)\n print('drain on')\n else:\n print('drain off')\n r.drain_off()\n return HttpResponse('Turning reward %s' % onoff)\n\ndef populate_models(request):\n \"\"\" Database initialization code. When 'db.tracker' is imported, it goes through the database and ensures that\n 1) at least one subject is present\n 2) all the tasks from 'tasklist' appear in the db\n 3) all the features from 'featurelist' appear in the db\n 4) all the generators from all the tasks appear in the db\n \"\"\"\n subjects = Subject.objects.all()\n if len(subjects) == 0:\n subj = Subject(name='testing')\n subj.save()\n\n for m in [Generator, System]:\n m.populate()\n\n return HttpResponse(\"Updated Tasks, features generators, and systems\")\n\n@csrf_exempt\ndef add_new_task(request):\n from . import models\n name, import_path = request.POST['name'], request.POST['import_path']\n\n # verify import path\n if import_path == '':\n import_path = \"riglib.experiment.Experiment\"\n\n try:\n import_by_path(import_path)\n except:\n import traceback\n traceback.print_exc()\n return _respond(dict(msg=\"import path invalid!\", status=\"error\"))\n\n task = Task(name=name, import_path=import_path)\n task.save()\n\n # add any new generators for the task\n Generator.remove_unused() \n Generator.populate()\n\n task_data = dict(id=task.id, name=task.name, import_path=task.import_path)\n return _respond(dict(msg=\"Added new task: %s\" % task.name, status=\"success\", data=task_data))\n\n@csrf_exempt\ndef remove_task(request):\n id = request.POST.get('id')\n task = Task.objects.filter(id=id)\n try:\n entry = TaskEntry.objects.filter(task=id).values_list('id', flat=True)\n except TaskEntry.DoesNotExist:\n entry = None\n if entry is None or len(entry) == 0:\n try:\n Sequence.objects.filter(task=id).delete()\n except Sequence.DoesNotExist:\n pass\n task.delete()\n return _respond(dict(msg=\"Removed task\", status=\"success\"))\n else:\n return _respond(dict(msg=\"Couldn't remove task, experiments {0} use it.\".format(list(entry)), status=\"error\"))\n\n@csrf_exempt\ndef add_new_subject(request):\n subject_name = request.POST['subject_name']\n subj = Subject(name=subject_name)\n subj.save()\n\n return _respond(dict(msg=\"Added new subject: %s\" % subj.name, status=\"success\", data=dict(id=subj.id, name=subj.name)))\n\n@csrf_exempt\ndef remove_subject(request):\n id = request.POST.get('id')\n try:\n Subject.objects.filter(id=id).delete()\n return _respond(dict(msg=\"Removed subject\", status=\"success\"))\n except ProtectedError:\n return _respond(dict(msg=\"Couldn't remove subject, there must be valid experiments that use it\", status=\"error\"))\n\n@csrf_exempt\ndef add_new_experimenter(request):\n exp_name = request.POST['experimenter_name']\n exp = Experimenter(name=exp_name)\n exp.save()\n\n return _respond(dict(msg=\"Added new experimenter: %s\" % exp.name, status=\"success\", data=dict(id=exp.id, name=exp.name)))\n\n@csrf_exempt\ndef remove_experimenter(request):\n id = request.POST.get('id')\n try:\n Experimenter.objects.filter(id=id).delete()\n return _respond(dict(msg=\"Removed experimenter\", status=\"success\"))\n except ProtectedError:\n return \n\n@csrf_exempt\ndef add_new_system(request):\n sys = System(name=request.POST['name'], path=request.POST['path'],\n processor_path=request.POST['processor_path'])\n sys.save()\n\n system_data = dict(id=sys.id, name=sys.name)\n return _respond(dict(msg=\"Added new system: %s\" % sys.name, status=\"success\", data=system_data))\n\n@csrf_exempt\ndef remove_system(request):\n from . import models\n id = request.POST.get('id')\n try:\n System.objects.filter(id=id).delete()\n return _respond(dict(msg=\"Removed system\", status=\"success\"))\n except ProtectedError:\n return _respond(dict(msg=\"Couldn't remove system, there must be valid experiments that use it\", status=\"error\"))\n\n@csrf_exempt\ndef toggle_features(request):\n from features import built_in_features\n from . import models\n\n name = request.POST.get('name')\n \n # check if the feature is already installed\n existing_features = Feature.objects.filter(name=name)\n\n if len(existing_features) > 0:\n # disable the feature\n Feature.objects.filter(name=name).delete()\n msg = \"Disabled feature: %s\" % str(name)\n return _respond(dict(msg=msg, status=\"success\"))\n elif name in built_in_features:\n import_path = built_in_features[name].__module__ + '.' + built_in_features[name].__qualname__\n feat = Feature(name=name, import_path=import_path)\n feat.save()\n msg = \"Enabled built-in feature: %s\" % str(feat.name)\n return _respond(dict(msg=msg, status=\"success\", id=feat.id))\n else:\n # something is wrong\n return _respond(dict(msg=\"feature not valid!\", status=\"error\")) \n\n@csrf_exempt\ndef add_new_feature(request):\n from . import models\n name, import_path = request.POST['name'], request.POST['import_path']\n\n # verify import path\n try:\n import_by_path(import_path)\n except:\n import traceback\n traceback.print_exc()\n return _respond(dict(msg=\"import path invalid!\", status=\"error\"))\n\n feat = Feature(name=name, import_path=import_path)\n feat.save()\n \n feature_data = dict(id=feat.id, name=feat.name, import_path=feat.import_path)\n return _respond(dict(msg=\"Added new feature: %s\" % feat.name, status=\"success\", data=feature_data))\n\n@csrf_exempt\ndef setup_run_upkeep(request):\n # Update the list of generators\n from . import models\n Generator.populate()\n return HttpResponse(\"Updated generators!\")\n\n@csrf_exempt\ndef get_report(request):\n '''\n Get data for the report field in the frontend\n '''\n def report_fn(tracker):\n tracker.task_proxy.update_report_stats()\n reportstats = tracker.task_proxy.reportstats\n return reportstats\n\n return rpc(report_fn)\n\n@csrf_exempt\ndef trigger_control(request):\n '''\n Trigger an action via controls on the web interface\n '''\n def control_fn(tracker):\n try:\n method = getattr(tracker.task_proxy, request.POST[\"control\"])\n if \"params\" in request.POST:\n params = json.loads(request.POST.get(\"params\"))\n print(method)\n return method(**params)\n else:\n return method()\n except Exception as e:\n traceback.print_exc()\n\n if \"base_class\" in request.POST:\n\n # If this is a static method, it will have a base_class\n task_id = request.POST[\"base_class\"]\n feature_names = json.loads((request.POST['feats'])).keys()\n task = Task.objects.get(pk=task_id).get(feats=feature_names)\n try:\n fn = getattr(task, request.POST[\"control\"])\n print(fn)\n if \"params\" in request.POST:\n params = json.loads(request.POST.get(\"params\"))\n result = fn(**params)\n else:\n result = fn()\n return _respond(dict(status=\"success\", value=result))\n except Exception as e:\n traceback.print_exc()\n return _respond_err(e)\n \n else:\n\n # Otherwise it is a method belonging to the active task\n return rpc(control_fn)\n\n@csrf_exempt\ndef get_status(request):\n \"\"\" Send the task tracker's status back to the frontend \"\"\"\n tracker = Track.get_instance()\n if tracker.task_kwargs is None:\n saveid = None\n else:\n saveid = tracker.task_kwargs[\"saveid\"]\n print(\"saveid\", saveid)\n return _respond(dict(status=tracker.get_status(), saveid=saveid))\n\n@csrf_exempt\ndef save_entry_name(request):\n from . import models\n te_rec = TaskEntry.objects.get(id=request.POST[\"id\"])\n te_rec.entry_name = request.POST[\"entry_name\"]\n te_rec.save()\n return _respond(dict(status=\"success\", msg=\"Saved entry name: %s\" % te_rec.entry_name))\n\ndef update_built_in_feature_import_paths(request):\n \"\"\"For built-in features, update the import path based on the features module\"\"\"\n from . import models\n for feat in Feature.objects.all():\n feat.get(update_builtin=True)\n return _respond(dict(status=\"success\", msg=\"Updated built-in feature paths!\"))\n\ndef update_database_storage_path(request):\n from . import models\n db_name = request.POST['db_name']\n db_storage_path = request.POST['db_storage_path']\n\n KeyValueStore.set(\"data_path\", db_storage_path, dbname=db_name)\n return _respond(dict(status=\"success\", msg=\"Updated storage path for %s db\" % db_name))\n\ndef save_recording_sys(request):\n from . import models\n KeyValueStore.set('recording_sys', request.POST['selected_recording_sys'])\n print(KeyValueStore.get('recording_sys'))\n ret_msg = \"Set recording_sys to %s\" % KeyValueStore.get('recording_sys')\n return _respond(dict(status=\"success\", msg=ret_msg))\n\ndef save_rig_name(request):\n from . import models\n KeyValueStore.set('rig_name', request.POST['rig_name'])\n print(KeyValueStore.get('rig_name'))\n ret_msg = \"Set rig_name to %s\" % KeyValueStore.get('rig_name')\n return _respond(dict(status=\"success\", msg=ret_msg))\n\n@csrf_exempt\ndef setup_handler(request):\n \"\"\"One-stop handler for setup functions to avoid adding a bunch of URLs\"\"\"\n action = request.POST['action']\n if action == \"update_database_storage_path\":\n return update_database_storage_path(request)\n elif action == \"save_recording_sys\":\n return save_recording_sys(request)\n elif action == \"save_rig_name\":\n return save_rig_name(request)\n elif action == \"update_built_in_feature_paths\":\n return update_built_in_feature_import_paths(request)\n else:\n return _respond(dict(status=\"error\", msg=\"Unrecognized data type: %s\" % data_type))\n\n", "sub_path": "db/tracker/ajax.py", "file_name": "ajax.py", "file_ext": "py", "file_size_in_byte": 24165, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "models.Decoder.objects.filter", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Decoder.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.Decoder", "line_number": 44, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 26, "usage_type": "name"}, {"api_name": "json.JSONEncoder", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 72, "usage_type": "attribute"}, {"api_name": "json_param.Parameters", "line_number": 74, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Task.objects.using", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 110, "usage_type": "name"}, {"api_name": "models.Feature.objects.using", "line_number": 114, "usage_type": "call"}, {"api_name": "models.Feature.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "models.Feature", "line_number": 114, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.using", "line_number": 118, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 118, "usage_type": "name"}, {"api_name": "models.TaskEntry.get_default_metadata", "line_number": 121, "usage_type": "call"}, {"api_name": "models.TaskEntry", "line_number": 121, "usage_type": "name"}, {"api_name": "riglib.experiment.Sequence", "line_number": 127, "usage_type": "attribute"}, {"api_name": "riglib.experiment", "line_number": 127, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.using", "line_number": 149, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 149, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 167, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "models.Sequence.from_json", "line_number": 169, "usage_type": "call"}, {"api_name": "models.Sequence", "line_number": 169, "usage_type": "name"}, {"api_name": "models.Task.objects.get", "line_number": 170, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 170, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 163, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 181, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 181, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 191, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 191, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 198, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 198, "usage_type": "name"}, {"api_name": "models.DataFile.objects.filter", "line_number": 200, "usage_type": "call"}, {"api_name": "models.DataFile.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "models.DataFile", "line_number": 200, "usage_type": "name"}, {"api_name": "models.DataFile.DoesNotExist", "line_number": 201, "usage_type": "attribute"}, {"api_name": "models.DataFile", "line_number": 201, "usage_type": "name"}, {"api_name": "models.Decoder.objects.filter", "line_number": 204, "usage_type": "call"}, {"api_name": "models.Decoder.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "models.Decoder", "line_number": 204, "usage_type": "name"}, {"api_name": "models.Decoder.DoesNotExist", "line_number": 205, "usage_type": "attribute"}, {"api_name": "models.Decoder", "line_number": 205, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 214, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 214, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 214, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 223, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 223, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 223, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 232, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 232, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 241, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 241, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 241, "usage_type": "name"}, {"api_name": "models.Generator.objects.get", "line_number": 248, "usage_type": "call"}, {"api_name": "models.Generator.objects", "line_number": 248, "usage_type": "attribute"}, {"api_name": "models.Generator", "line_number": 248, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 251, "usage_type": "call"}, {"api_name": "tasktrack.Track.get_instance", "line_number": 267, "usage_type": "call"}, {"api_name": "tasktrack.Track", "line_number": 267, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 274, "usage_type": "call"}, {"api_name": "models.Task.objects.get", "line_number": 276, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 276, "usage_type": "name"}, {"api_name": "models.Subject.objects.get", "line_number": 279, "usage_type": "call"}, {"api_name": "models.Subject.objects", "line_number": 279, "usage_type": "attribute"}, {"api_name": "models.Subject", "line_number": 279, "usage_type": "name"}, {"api_name": "models.Experimenter.objects.get", "line_number": 281, "usage_type": "call"}, {"api_name": "models.Experimenter.objects", "line_number": 281, "usage_type": "attribute"}, {"api_name": "models.Experimenter", "line_number": 281, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.create", "line_number": 285, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 285, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 292, "usage_type": "call"}, {"api_name": "json_param.Parameters.from_html", "line_number": 294, "usage_type": "call"}, {"api_name": "json_param.Parameters", "line_number": 294, "usage_type": "name"}, {"api_name": "models.Feature.getall", "line_number": 296, "usage_type": "call"}, {"api_name": "models.Feature", "line_number": 296, "usage_type": "name"}, {"api_name": "json_param.Parameters.from_html", "line_number": 299, "usage_type": "call"}, {"api_name": "json_param.Parameters", "line_number": 299, "usage_type": "name"}, {"api_name": "riglib.experiment.Sequence", "line_number": 303, "usage_type": "attribute"}, {"api_name": "riglib.experiment", "line_number": 303, "usage_type": "name"}, {"api_name": "models.Sequence.from_json", "line_number": 304, "usage_type": "call"}, {"api_name": "models.Sequence", "line_number": 304, "usage_type": "name"}, {"api_name": "git.repo.Repo", "line_number": 317, "usage_type": "call"}, {"api_name": "git.repo", "line_number": 317, "usage_type": "attribute"}, {"api_name": "models.Feature.objects.get", "line_number": 329, "usage_type": "call"}, {"api_name": "models.Feature.objects", "line_number": 329, "usage_type": "attribute"}, {"api_name": "models.Feature", "line_number": 329, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 355, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 356, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 357, "usage_type": "call"}, {"api_name": "tasktrack.log_str", "line_number": 359, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 260, "usage_type": "name"}, {"api_name": "tasktrack.Track.get_instance", "line_number": 378, "usage_type": "call"}, {"api_name": "tasktrack.Track", "line_number": 378, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 394, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 411, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 412, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 416, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 434, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 434, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 434, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 432, "usage_type": "name"}, {"api_name": "riglib.reward.open", "line_number": 445, "usage_type": "call"}, {"api_name": "riglib.reward", "line_number": 445, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 453, "usage_type": "call"}, {"api_name": "models.Subject.objects.all", "line_number": 462, "usage_type": "call"}, {"api_name": "models.Subject.objects", "line_number": 462, "usage_type": "attribute"}, {"api_name": "models.Subject", "line_number": 462, "usage_type": "name"}, {"api_name": "models.Subject", "line_number": 464, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 467, "usage_type": "name"}, {"api_name": "models.System", "line_number": 467, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 470, "usage_type": "call"}, {"api_name": "models.import_by_path", "line_number": 482, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 485, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 488, "usage_type": "call"}, {"api_name": "models.Generator.remove_unused", "line_number": 492, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 492, "usage_type": "name"}, {"api_name": "models.Generator.populate", "line_number": 493, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 493, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 472, "usage_type": "name"}, {"api_name": "models.Task.objects.filter", "line_number": 501, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 501, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 501, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.filter", "line_number": 503, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 503, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 503, "usage_type": "name"}, {"api_name": "models.TaskEntry.DoesNotExist", "line_number": 504, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 504, "usage_type": "name"}, {"api_name": "models.Sequence.objects.filter", "line_number": 508, "usage_type": "call"}, {"api_name": "models.Sequence.objects", "line_number": 508, "usage_type": "attribute"}, {"api_name": "models.Sequence", "line_number": 508, "usage_type": "name"}, {"api_name": "models.Sequence.DoesNotExist", "line_number": 509, "usage_type": "attribute"}, {"api_name": "models.Sequence", "line_number": 509, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 498, "usage_type": "name"}, {"api_name": "models.Subject", "line_number": 519, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 516, "usage_type": "name"}, {"api_name": "models.Subject.objects.filter", "line_number": 528, "usage_type": "call"}, {"api_name": "models.Subject.objects", "line_number": 528, "usage_type": "attribute"}, {"api_name": "models.Subject", "line_number": 528, "usage_type": "name"}, {"api_name": "django.db.models.ProtectedError", "line_number": 530, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 524, "usage_type": "name"}, {"api_name": "models.Experimenter", "line_number": 536, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 533, "usage_type": "name"}, {"api_name": "models.Experimenter.objects.filter", "line_number": 545, "usage_type": "call"}, {"api_name": "models.Experimenter.objects", "line_number": 545, "usage_type": "attribute"}, {"api_name": "models.Experimenter", "line_number": 545, "usage_type": "name"}, {"api_name": "django.db.models.ProtectedError", "line_number": 547, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 541, "usage_type": "name"}, {"api_name": "models.System", "line_number": 552, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 550, "usage_type": "name"}, {"api_name": "models.System.objects.filter", "line_number": 564, "usage_type": "call"}, {"api_name": "models.System.objects", "line_number": 564, "usage_type": "attribute"}, {"api_name": "models.System", "line_number": 564, "usage_type": "name"}, {"api_name": "django.db.models.ProtectedError", "line_number": 566, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 559, "usage_type": "name"}, {"api_name": "models.Feature.objects.filter", "line_number": 577, "usage_type": "call"}, {"api_name": "models.Feature.objects", "line_number": 577, "usage_type": "attribute"}, {"api_name": "models.Feature", "line_number": 577, "usage_type": "name"}, {"api_name": "models.Feature.objects.filter", "line_number": 581, "usage_type": "call"}, {"api_name": "models.Feature.objects", "line_number": 581, "usage_type": "attribute"}, {"api_name": "models.Feature", "line_number": 581, "usage_type": "name"}, {"api_name": "features.built_in_features", "line_number": 584, "usage_type": "name"}, {"api_name": "features.built_in_features", "line_number": 585, "usage_type": "name"}, {"api_name": "models.Feature", "line_number": 586, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 569, "usage_type": "name"}, {"api_name": "models.import_by_path", "line_number": 601, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 604, "usage_type": "call"}, {"api_name": "models.Feature", "line_number": 607, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 594, "usage_type": "name"}, {"api_name": "models.Generator.populate", "line_number": 617, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 617, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 618, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 613, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 620, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 641, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 647, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 653, "usage_type": "call"}, {"api_name": "models.Task.objects.get", "line_number": 654, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 654, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 654, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 659, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 665, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 632, "usage_type": "name"}, {"api_name": "tasktrack.Track.get_instance", "line_number": 676, "usage_type": "call"}, {"api_name": "tasktrack.Track", "line_number": 676, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 673, "usage_type": "name"}, {"api_name": "models.TaskEntry.objects.get", "line_number": 687, "usage_type": "call"}, {"api_name": "models.TaskEntry.objects", "line_number": 687, "usage_type": "attribute"}, {"api_name": "models.TaskEntry", "line_number": 687, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 684, "usage_type": "name"}, {"api_name": "models.Feature.objects.all", "line_number": 695, "usage_type": "call"}, {"api_name": "models.Feature.objects", "line_number": 695, "usage_type": "attribute"}, {"api_name": "models.Feature", "line_number": 695, "usage_type": "name"}, {"api_name": "models.KeyValueStore.set", "line_number": 704, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 704, "usage_type": "name"}, {"api_name": "models.KeyValueStore.set", "line_number": 709, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 709, "usage_type": "name"}, {"api_name": "models.KeyValueStore.get", "line_number": 710, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 710, "usage_type": "name"}, {"api_name": "models.KeyValueStore.get", "line_number": 711, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 711, "usage_type": "name"}, {"api_name": "models.KeyValueStore.set", "line_number": 716, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 716, "usage_type": "name"}, {"api_name": "models.KeyValueStore.get", "line_number": 717, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 717, "usage_type": "name"}, {"api_name": "models.KeyValueStore.get", "line_number": 718, "usage_type": "call"}, {"api_name": "models.KeyValueStore", "line_number": 718, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 721, "usage_type": "name"}]} +{"seq_id": "69304178", "text": "import itertools\n\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.externals.joblib import delayed, Parallel\nfrom sklearn.utils import gen_even_slices\n\nfrom ..base import BaseDetector\nfrom ..utils import timeit, OneDimArray, TwoDimArray\n\n__all__ = ['FastABOD']\n\n\ndef approximate_abof(\n X: TwoDimArray,\n X_train: TwoDimArray,\n neigh_ind: TwoDimArray\n) -> OneDimArray:\n \"\"\"Compute the approximate Angle-Based Outlier Factor (ABOF) for each\n sample.\n \"\"\"\n\n with np.errstate(invalid='raise'):\n return np.var([\n [\n (diff_a @ diff_b) / (diff_a @ diff_a) / (diff_b @ diff_b)\n for diff_a, diff_b in itertools.combinations(\n X_neigh - query_point, 2\n )\n ]\n for query_point, X_neigh in zip(X, X_train[neigh_ind])\n ], axis=1)\n\n\nclass FastABOD(BaseDetector):\n \"\"\"Fast Angle-Based Outlier Detector (FastABOD).\n\n Parameters\n ----------\n fpr : float, default 0.01\n False positive rate. Used to compute the threshold.\n\n n_jobs : int, default 1\n Number of jobs to run in parallel. If -1, then the number of jobs is\n set to the number of CPU cores.\n\n verbose : bool, default False\n Enable verbose output.\n\n kwargs : dict\n Other keywords passed to sklearn.neighbors.NearestNeighbors().\n\n Attributes\n ----------\n threshold_ : float\n Threshold.\n\n X_ : array-like of shape (n_samples, n_features)\n Training data.\n\n References\n ----------\n H.-P. Kriegel, M. Schubert and A. Zimek,\n \"Angle-based outlier detection in high-dimensional data,\"\n In Proceedings of SIGKDD'08, pp. 444-452, 2008.\n \"\"\"\n\n @property\n def X_(self) -> TwoDimArray:\n return self._knn._fit_X\n\n def __init__(\n self,\n fpr: float = 0.01,\n n_jobs: int = 1,\n verbose: bool = False,\n **kwargs\n ) -> None:\n super().__init__(fpr=fpr, verbose=verbose)\n\n self.n_jobs = n_jobs\n self._knn = NearestNeighbors(**kwargs)\n\n self.check_params()\n\n def check_params(self) -> None:\n \"\"\"Check validity of parameters and raise ValueError if not valid.\"\"\"\n\n super().check_params()\n\n @timeit\n def fit(self, X: TwoDimArray, y: OneDimArray = None) -> 'FastABOD':\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : ignored\n\n Returns\n -------\n self : FastABOD\n Return self.\n \"\"\"\n\n self._knn.fit(X)\n\n anomaly_score = self.anomaly_score()\n self.threshold_ = np.percentile(anomaly_score, 100. * (1. - self.fpr))\n\n return self\n\n def anomaly_score(self, X: TwoDimArray = None) -> OneDimArray:\n \"\"\"Compute the anomaly score for each sample.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default None\n Data. If not provided, the anomaly score for each training sample\n is returned.\n\n Returns\n -------\n anomaly_score : array-like of shape (n_samples,)\n Anomaly score for each sample.\n \"\"\"\n\n neigh_ind = self._knn.kneighbors(X, return_distance=False)\n\n if X is None:\n X = self.X_\n\n n_samples, _ = X.shape\n\n try:\n result = Parallel(n_jobs=self.n_jobs)(\n delayed(approximate_abof)(\n X[s], self.X_, neigh_ind[s]\n ) for s in gen_even_slices(n_samples, self.n_jobs)\n )\n except FloatingPointError as e:\n raise ValueError('X must not contain training samples') from e\n\n return -np.concatenate(result)\n\n def feature_wise_anomaly_score(self, X: TwoDimArray = None) -> TwoDimArray:\n raise NotImplementedError()\n\n def score(X: TwoDimArray, y: OneDimArray = None) -> float:\n raise NotImplementedError()\n", "sub_path": "kenchi/outlier_detection/angle_based.py", "file_name": "angle_based.py", "file_ext": "py", "file_size_in_byte": 4091, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "utils.TwoDimArray", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.errstate", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 24, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.OneDimArray", "line_number": 18, "usage_type": "name"}, {"api_name": "base.BaseDetector", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 69, "usage_type": "name"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.TwoDimArray", "line_number": 92, "usage_type": "name"}, {"api_name": "utils.OneDimArray", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.timeit", "line_number": 91, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.Parallel", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.delayed", "line_number": 139, "usage_type": "call"}, {"api_name": "sklearn.utils.gen_even_slices", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.OneDimArray", "line_number": 115, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 148, "usage_type": "name"}, {"api_name": "utils.TwoDimArray", "line_number": 151, "usage_type": "name"}, {"api_name": "utils.OneDimArray", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "501437840", "text": "# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import( Column,\n DJANGOCMS_GRID_LG_CHOICES,\n DJANGOCMS_GRID_MD_CHOICES,\n DJANGOCMS_GRID_SM_CHOICES,\n DJANGOCMS_GRID_XS_CHOICES)\n\n\nclass ColumnPluginForm(forms.ModelForm):\n size_md = forms.ChoiceField(label=_(\"Medium size\"), help_text=_('Medium devices Desktops (>=992px)'),\n choices=DJANGOCMS_GRID_MD_CHOICES, required=True)\n\n size_lg = forms.ChoiceField(label=_(\"Large size\"), help_text=_('Large devices Desktops (>=1200px)'),\n choices=DJANGOCMS_GRID_LG_CHOICES, required=False)\n\n size_sm = forms.ChoiceField(label=_(\"Small size\"), help_text=_('Small devices Tablets (>=768px)'),\n choices=DJANGOCMS_GRID_SM_CHOICES, required=False)\n\n size_xs = forms.ChoiceField(label=_(\"Extra small size\"), help_text=_('Extra small devices Phones (<768px)'),\n choices=DJANGOCMS_GRID_XS_CHOICES, required=False)\n\n class Meta:\n model = Column\n exclude = ('size', 'page', 'position', 'placeholder', 'language', 'plugin_type')\n\n def __init__(self, *args, **kwargs):\n\n super(ColumnPluginForm, self).__init__(*args, **kwargs)\n if self.instance:\n current_size_list = self.instance.size.split()\n for size in current_size_list:\n if size in [x[0] for x in DJANGOCMS_GRID_LG_CHOICES]:\n self.fields['size_lg'].initial = size\n elif size in [x[0] for x in DJANGOCMS_GRID_MD_CHOICES]:\n self.fields['size_md'].initial = size\n elif size in [x[0] for x in DJANGOCMS_GRID_SM_CHOICES]:\n self.fields['size_sm'].initial = size\n elif size in [x[0] for x in DJANGOCMS_GRID_XS_CHOICES]:\n self.fields['size_xs'].initial = size\n", "sub_path": "djangocms_bootstrap3/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "django.forms.ModelForm", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 12, "usage_type": "call"}, {"api_name": "models.DJANGOCMS_GRID_MD_CHOICES", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 15, "usage_type": "call"}, {"api_name": "models.DJANGOCMS_GRID_LG_CHOICES", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 18, "usage_type": "call"}, {"api_name": "models.DJANGOCMS_GRID_SM_CHOICES", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 21, "usage_type": "call"}, {"api_name": "models.DJANGOCMS_GRID_XS_CHOICES", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Column", "line_number": 25, "usage_type": "name"}, {"api_name": "models.DJANGOCMS_GRID_LG_CHOICES", "line_number": 34, "usage_type": "name"}, {"api_name": "models.DJANGOCMS_GRID_MD_CHOICES", "line_number": 36, "usage_type": "name"}, {"api_name": "models.DJANGOCMS_GRID_SM_CHOICES", "line_number": 38, "usage_type": "name"}, {"api_name": "models.DJANGOCMS_GRID_XS_CHOICES", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "263821164", "text": "import json\nimport logging\nfrom typing import List, Callable, Any, Optional\n\nfrom cryptoxlib.WebsocketMgr import Subscription, WebsocketMgr, WebsocketMessage, Websocket, CallbacksType\nfrom cryptoxlib.Pair import Pair\nfrom cryptoxlib.clients.binance.functions import map_ws_pair\nfrom cryptoxlib.clients.binance.enums import CandelstickInterval\n\nLOG = logging.getLogger(__name__)\n\n\nclass BinanceWebsocket(WebsocketMgr):\n WEBSOCKET_URI = \"wss://stream.binance.com:9443/\"\n SUBSCRIPTION_ID = 0\n\n def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,\n websocket_uri: str = None, ssl_context = None) -> None:\n super().__init__(websocket_uri = websocket_uri if websocket_uri is not None else BinanceWebsocket.WEBSOCKET_URI,\n subscriptions = subscriptions,\n builtin_ping_interval = None,\n ssl_context = ssl_context,\n auto_reconnect = True)\n\n self.api_key = api_key\n self.sec_key = sec_key\n self.binance_client = binance_client\n\n def get_websocket_uri_variable_part(self):\n return \"stream?streams=\" + \"/\".join([subscription.get_channel_name() for subscription in self.subscriptions])\n\n async def initialize_subscriptions(self) -> None:\n for subscription in self.subscriptions:\n await subscription.initialize(binance_client = self.binance_client)\n\n async def _subscribe(self, websocket: Websocket):\n BinanceWebsocket.SUBSCRIPTION_ID += 1\n\n subscription_message = {\n \"method\": \"SUBSCRIBE\",\n \"params\": [\n subscription.get_channel_name() for subscription in self.subscriptions\n ],\n \"id\": BinanceWebsocket.SUBSCRIPTION_ID\n }\n\n LOG.debug(f\"> {subscription_message}\")\n await websocket.send(json.dumps(subscription_message))\n\n @staticmethod\n def _is_subscription_confirmation(response):\n if 'result' in response and response['result'] is None:\n return True\n else:\n return False\n\n async def _process_message(self, websocket: Websocket, message: str) -> None:\n message = json.loads(message)\n\n if self._is_subscription_confirmation(message):\n LOG.info(f\"Subscription confirmed for id: {message['id']}\")\n else:\n # regular message\n await self.publish_message(WebsocketMessage(subscription_id = message['stream'], message = message))\n\n\nclass BinanceSubscription(Subscription):\n def __init__(self, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n @staticmethod\n def get_channel_name():\n pass\n\n def get_subscription_message(self, **kwargs) -> dict:\n pass\n\n def construct_subscription_id(self) -> Any:\n return self.get_channel_name()\n\n\nclass AllMarketTickersSubscription(BinanceSubscription):\n def __init__(self, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n def get_channel_name(self):\n return \"!ticker@arr\"\n\n\nclass BestOrderBookTickerSubscription(BinanceSubscription):\n def __init__(self, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n def get_channel_name(self):\n return \"!bookTicker\"\n\n\nclass BestOrderBookSymbolTickerSubscription(BinanceSubscription):\n def __init__(self, pair: Pair, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n self.pair = pair\n\n def get_channel_name(self):\n return f\"{map_ws_pair(self.pair)}@bookTicker\"\n\n\nclass TradeSubscription(BinanceSubscription):\n def __init__(self, pair: Pair, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n self.pair = pair\n\n def get_channel_name(self):\n return map_ws_pair(self.pair) + \"@trade\"\n\n\nclass AggregateTradeSubscription(BinanceSubscription):\n def __init__(self, pair: Pair, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n self.pair = pair\n\n def get_channel_name(self):\n return map_ws_pair(self.pair) + \"@aggTrade\"\n\n\nclass CandlestickSubscription(BinanceSubscription):\n def __init__(self, pair: Pair, interval: CandelstickInterval, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n self.pair = pair\n self.interval = interval\n\n def get_channel_name(self):\n return f\"{map_ws_pair(self.pair)}@kline_{self.interval.value}\"\n\n\nclass AccountSubscription(BinanceSubscription):\n def __init__(self, callbacks: CallbacksType = None):\n super().__init__(callbacks)\n\n self.listen_key = None\n\n async def initialize(self, **kwargs):\n binance_client = kwargs['binance_client']\n listen_key_response = await binance_client.get_listen_key()\n self.listen_key = listen_key_response[\"response\"][\"listenKey\"]\n LOG.debug(f'Listen key: {self.listen_key}')\n\n def get_channel_name(self):\n return self.listen_key\n\n\nclass BinanceTestnetWebsocket(BinanceWebsocket):\n WEBSOCKET_URI = \"wss://testnet.binance.vision/\"\n\n def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,\n ssl_context = None) -> None:\n super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,\n sec_key = sec_key, websocket_uri = BinanceTestnetWebsocket.WEBSOCKET_URI,\n ssl_context = ssl_context)\n\n", "sub_path": "cryptoxlib/clients/binance/BinanceWebsocket.py", "file_name": "BinanceWebsocket.py", "file_ext": "py", "file_size_in_byte": 5553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "cryptoxlib.WebsocketMgr.WebsocketMgr", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.Subscription", "line_number": 17, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.Websocket", "line_number": 36, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "cryptoxlib.WebsocketMgr.Websocket", "line_number": 57, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "cryptoxlib.WebsocketMgr.WebsocketMessage", "line_number": 64, "usage_type": "call"}, {"api_name": "cryptoxlib.WebsocketMgr.Subscription", "line_number": 67, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 83, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 91, "usage_type": "name"}, {"api_name": "cryptoxlib.Pair.Pair", "line_number": 99, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 99, "usage_type": "name"}, {"api_name": "cryptoxlib.clients.binance.functions.map_ws_pair", "line_number": 105, "usage_type": "call"}, {"api_name": "cryptoxlib.Pair.Pair", "line_number": 109, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 109, "usage_type": "name"}, {"api_name": "cryptoxlib.clients.binance.functions.map_ws_pair", "line_number": 115, "usage_type": "call"}, {"api_name": "cryptoxlib.Pair.Pair", "line_number": 119, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 119, "usage_type": "name"}, {"api_name": "cryptoxlib.clients.binance.functions.map_ws_pair", "line_number": 125, "usage_type": "call"}, {"api_name": "cryptoxlib.Pair.Pair", "line_number": 129, "usage_type": "name"}, {"api_name": "cryptoxlib.clients.binance.enums.CandelstickInterval", "line_number": 129, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 129, "usage_type": "name"}, {"api_name": "cryptoxlib.clients.binance.functions.map_ws_pair", "line_number": 136, "usage_type": "call"}, {"api_name": "cryptoxlib.WebsocketMgr.CallbacksType", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 158, "usage_type": "name"}, {"api_name": "cryptoxlib.WebsocketMgr.Subscription", "line_number": 158, "usage_type": "name"}]} +{"seq_id": "216731355", "text": "import pymysql\n\nclass SQL :\n def __init__(self, host='192.168.137.172', user='ssu', password='',\n db='ssudb', charset='utf8'):\n self.connection = pymysql.connect(host=host,\n user=user,\n password=password,\n db=db,\n charset=charset)\n self.cursor = self.connection.cursor()\n def test(self) :\n try:\n query = \"UPDATE data_table SET data=98 WHERE seat_number=0 AND name='Soongsil Univ';\"\n print(query)\n self.cursor.execute(query)\n self.connection.commit()\n\n except Exception as e :\n print(str(e))\n \n \n def insert(self, table, *values):\n try:\n query = \"INSERT INTO \"+table+\" VALUES ({}, {}, {},{});\".format(*values)\n print(query)\n self.cursor.execute(query)\n self.connection.commit()\n\n except Exception as e :\n print(str(e))\n\n def update(self, table, value1,value2, *conds):\n try:\n query = \"UPDATE \"+table+\" SET remainedtime={},cond={}\".format(value1,value2)\n query += \" WHERE seat_number={} AND name={};\".format(*conds)\n print(query)\n self.cursor.execute(query)\n self.connection.commit()\n\n except Exception as e :\n print(str(e))\n\n def delete(self, table, *conds):\n try:\n query = \"DELETE FROM \"+table\n query += \" WHERE seat_number={} AND name={};\".format(*conds)\n print(query)\n self.cursor.execute(query)\n self.connection.commit()\n\n except Exception as e :\n print(str(e))\n\n def select(self, table):\n try:\n query = \"SELECT * FROM \"+table+\";\"\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n except Exception as e :\n print(str(e))\n\n def select_where(self, table, where):\n try:\n query = \"SELECT * FROM {} WHERE {};\".format(table, where)\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n except Exception as e :\n print(str(e))\n", "sub_path": "ServerSit/odr.py", "file_name": "odr.py", "file_ext": "py", "file_size_in_byte": 2282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pymysql.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "223646032", "text": "import annoy\nimport numpy as np\nimport logging\nimport glob\nimport re\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass RecommenderVectorIndex(object):\n \"\"\"Database index for finding nearest neighbors. The rows are float\n vectors, such as Word2Vec vectors or some other embeddings.\n\n \"\"\"\n\n def __init__(self, vector_size, n_indices=1, n_trees=16):\n \"\"\"Creates an instance of recommender index which contains one or many\n AnnoyIndex instances.\n\n Args:\n vector_size (int): Size of vector annoy index keeps.\n n_indices (int, optional): Number of annoy indices to create.\n n_trees (int, optional): Number of trees in each annoy\n index. A larger value will give more accurate results,\n but larger indexes.\n \"\"\"\n self.indices = [annoy.AnnoyIndex(vector_size) for _ in\n range(n_indices)]\n self.vector_size = vector_size\n self.n_trees = n_trees\n\n @property\n def n_indices(self):\n return len(self.indices)\n\n def _fill_build_index(self, index, data):\n \"\"\"Fills one annoy index with data and build.\n\n Args:\n index (annoy.AnnoyIndex): Index to fill and build.\n data (numpy.array): Array with vectors.\n \"\"\"\n logger.info(\"INSERTing {0} vectors.\".format(data.shape[0]))\n for i in xrange(data.shape[0]):\n index.add_item(i, data[i])\n logger.info(\"Building index.\")\n index.build(self.n_trees)\n\n def fill_build(self, data):\n \"\"\"Fills annoy indices with vectors in data and builds all indices.\n\n Args:\n data (numpy.array, list of numpy.array): If `self.n_indices` ==\n 1, then `data` is a numpy.array with number of columns ==\n `self.vector_size`. Otherwise, `data` is a list of length\n equal to `self.n_indices` of `numpy.array`'s with the shape\n above.\n \"\"\"\n assert (self.n_indices == 1 and isinstance(data, np.ndarray)) or (\n self.n_indices > 1 and isinstance(data, list) and all(map(\n lambda x: isinstance(x, np.ndarray), data)))\n\n logger.info(\"Fill {0} indices.\".format(self.n_indices))\n if self.n_indices == 1:\n self._fill_build_index(self.indices[0], data)\n else:\n _parallel_fill_build(self, data)\n\n def get_n_items(self):\n \"\"\"Gets a list of sizes of each index.\n\n Returns:\n res (list of ints): List of sizes of each index.\n\n \"\"\"\n return [index.get_n_items() for index in self.indices]\n\n def get_nns_by_vector(self, vector, n_neighbors, n_index=0, search_k=-1,\n include_distances=True):\n \"\"\"Returns `n_neighbors` closest items of `vector`\n in index with number `n_index`.\n\n Args:\n vector (numpy.array): Vector which neighbors you want to find.\n n_neighbors (int): How many neighbors to find.\n n_index (int): In which index to search.\n search_k: The number of nodes to inspect during searching. A larger\n value will give more accurate results,\n but will take longer time to return.\n include_distances (bool): Whether to include distances or not.\n If True, it will return a 2 element tuple with two lists in it:\n the second one containing all corresponding distances.\n\n Returns:\n res (list or tuple of two lists): List of neigbors ids. If\n `include_distances` is True, then tuple of two lists.\n \"\"\"\n res = self.indices[n_index].get_nns_by_vector(vector, n_neighbors,\n search_k=search_k,\n include_distances=include_distances)\n return res\n\n def get_item_vector(self, i, n_index=0):\n \"\"\"Returns vector with number `i` from index `n_index`.\n\n Args:\n i (int): Id of vector.\n n_index: Number of index for searching.\n \"\"\"\n return self.indices[n_index].get_item_vector(i)\n\n def save(self, fname):\n for i in range(self.n_indices):\n index = self.indices[i]\n fname_out = fname + str(i)\n logger.info(\"Save index #{0} to {1}\".format(i, fname_out))\n index.save(fname_out)\n\n @staticmethod\n def __resolve_index_number(fname):\n match = re.fullmatch(r\".*?(\\d+)\", fname)\n return int(match.group(1))\n\n def load(self, fname):\n index_fnames = glob.glob(fname + '*')\n index_fnames = [fname + '0'] \n assert len(index_fnames) == self.n_indices\n for index_fname in index_fnames:\n self.indices[0] = annoy.AnnoyIndex(self.vector_size)\n self.indices[0].load(index_fname)\n", "sub_path": "django/mapdrive/utils/putin_face_recognition/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 4923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "annoy.AnnoyIndex", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 62, "usage_type": "attribute"}, {"api_name": "re.fullmatch", "line_number": 122, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 126, "usage_type": "call"}, {"api_name": "annoy.AnnoyIndex", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "244013969", "text": "import torch\nfrom torch.nn import Linear\n\nfrom torch_geometric.nn import TransformerConv\n\nclass GraphAttentionEmbedding(torch.nn.Module):\n def __init__(self, in_channels, out_channels, msg_dim, time_enc):\n super(GraphAttentionEmbedding, self).__init__()\n self.time_enc = time_enc\n edge_dim = msg_dim + time_enc.out_channels\n self.conv = TransformerConv(in_channels, out_channels // 2, heads=2,\n dropout=0.1, edge_dim=edge_dim)\n\n def forward(self, x, last_update, edge_index, t, msg):\n rel_t = last_update[edge_index[0]] - t\n rel_t_enc = self.time_enc(rel_t.to(x.dtype))\n edge_attr = torch.cat([rel_t_enc, msg], dim=-1)\n return self.conv(x, edge_index, edge_attr)\n\n\nclass LinkPredictor(torch.nn.Module):\n def __init__(self, in_channels):\n super(LinkPredictor, self).__init__()\n self.lin_src = Linear(in_channels, in_channels)\n self.lin_dst = Linear(in_channels, in_channels)\n self.lin_final = Linear(in_channels, 1)\n\n def forward(self, z_src, z_dst):\n h = self.lin_src(z_src) + self.lin_dst(z_dst)\n h = h.relu()\n return self.lin_final(h)", "sub_path": "src/layers/gnn.py", "file_name": "gnn.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.nn", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.TransformerConv", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "588713837", "text": "from __future__ import division\nfrom bokeh.plotting import cursession, figure, output_server, output_notebook, show\nfrom copy import copy\nfrom keras.callbacks import Callback\nfrom numpy import inf, nan\nfrom os import _exit\nfrom Print import printflush\n\n\n# The following defines a Class object that monitors and records\n# certain key data from the Neural Network training process;\n# it also includes a method \"plot_learning_curves\" that turns on a separate CPU process\n# that plots the Training and Validation learning curves live\nclass NeuralNetworkTrainingMonitor(Callback):\n def __init__(self, reporting_freq=False, plot_title='Neural Network Learning Curves', bokeh_output='server'):\n super(Callback, self).__init__()\n self.latest_epoch = -1\n self.latest_batch = -1\n self.batches = []\n self.train_losses = []\n self.approx_train_acc_in_latest_epoch = 0.\n self.val_losses = []\n self.latest_val_acc = None\n self.min_val_loss = inf\n self.best_model = None\n self.best_model_epoch = None\n self.best_model_train_acc = None\n self.best_model_val_acc = None\n self.reporting_freq = reporting_freq\n\n printflush('\\nConnecting to Bokeh Server for live Learning Curves plotting...\\n')\n try:\n output_server('')\n self.bokeh_session = cursession()\n self.fig = figure(title=plot_title,\n x_axis_label='# of Training Data Batches', y_axis_label='Loss',\n plot_height=680, plot_width=880)\n self.fig.line((), (), name='TrainLoss', legend='Training Loss')\n self.fig.circle((), (), name='ValidLoss', legend='Validation Loss', color='red')\n show(self.fig)\n self.train_losses_curve_data_source = self.fig.select(dict(name='TrainLoss'))[0].data_source\n self.valid_losses_curve_data_source = self.fig.select(dict(name='ValidLoss'))[0].data_source\n printflush('\\nConnecting to Bokeh Server for live Learning Curves plotting... done!\\n')\n except:\n printflush('\\nBokeh Server Connection *FAILED!*')\n printflush('Please make sure Bokeh package is already installed in Python, and')\n printflush('please open a new Command-Line Terminal window\\n (separate from this Terminal window)')\n printflush(' and run the following command firs to launch Bokeh Server:')\n printflush(' bokeh-server --backend=memory\\n')\n _exit(0)\n\n def on_train_begin(self, logs={}):\n printflush('\\nFFNN Training Progress')\n printflush('______________________')\n\n def on_epoch_begin(self, epoch, logs={}):\n self.latest_epoch += 1\n\n def on_batch_end(self, batch, logs={}):\n self.latest_batch += 1\n self.batches.append(self.latest_batch)\n self.train_losses.append(logs.get('loss'))\n train_acc = logs.get('acc')\n if not train_acc:\n train_acc = logs.get('accuracy')\n self.approx_train_acc_in_latest_epoch += (train_acc - self.approx_train_acc_in_latest_epoch) / (batch + 1)\n self.val_losses.append(logs.get('val_loss', nan))\n if self.reporting_freq and not(self.latest_batch % self.reporting_freq):\n self.report(batch_in_epoch=batch)\n\n def on_epoch_end(self, epoch, logs={}):\n current_val_loss = logs.get('val_loss')\n self.latest_val_acc = logs.get('val_acc')\n if not self.latest_val_acc:\n self.latest_val_acc = logs.get('val_accuracy')\n if current_val_loss is None:\n self.best_model = copy(self.model)\n else:\n self.val_losses[-1] = current_val_loss\n if current_val_loss < self.min_val_loss:\n self.min_val_loss = current_val_loss\n self.best_model = copy(self.model)\n self.best_model_epoch = epoch\n self.best_model_train_acc = self.approx_train_acc_in_latest_epoch\n self.best_model_val_acc = self.latest_val_acc\n if not self.reporting_freq:\n self.report()\n\n def on_train_end(self, logs={}):\n if self.reporting_freq:\n self.report()\n printflush('\\nFFNN Training Finished! (%s Batches in total)\\n'\n % '{:,}'.format(self.latest_batch))\n if self.latest_val_acc is None:\n printflush('Training Accuracy (approx) = %s%%\\n'\n % '{:.1f}'.format(100. * self.approx_train_acc_in_latest_epoch))\n else:\n printflush('Best trained FFNN (with lowest Validation Loss) is from epoch #%s'\n % '{:,}'.format(self.best_model_epoch))\n printflush('Training Accuracy (approx) = %s%%, Validation Accuracy = %s%%\\n'\n % ('{:.1f}'.format(100. * self.best_model_train_acc),\n '{:.1f}'.format(100. * self.latest_val_acc)))\n\n def report(self, batch_in_epoch=None):\n if batch_in_epoch:\n batch_text = ' Batch ' + '{0:03}'.format(batch_in_epoch)\n else:\n batch_text = ''\n if self.latest_val_acc is None:\n val_acc_text = ''\n else:\n val_acc_text = 'ValidAcc (prev epoch)=' + '{:.1f}'.format(100. * self.latest_val_acc) + '%'\n printflush('Epoch %s%s: TrainAcc (approx)=%s%%, %s'\n % ('{:,}'.format(self.latest_epoch),\n batch_text,\n '{:.1f}'.format(100. * self.approx_train_acc_in_latest_epoch),\n val_acc_text), end='\\r')\n\n self.train_losses_curve_data_source.data['x'] = self.batches\n self.train_losses_curve_data_source.data['y'] = self.train_losses\n\n self.valid_losses_curve_data_source.data['x'] = self.batches\n self.valid_losses_curve_data_source.data['y'] = self.val_losses\n\n self.bokeh_session.store_objects(self.train_losses_curve_data_source, self.valid_losses_curve_data_source)\n", "sub_path": "Programming Scripts/zzz Utility Code/Python/KerasTrainingMonitor.py", "file_name": "KerasTrainingMonitor.py", "file_ext": "py", "file_size_in_byte": 5998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "keras.callbacks.Callback", "line_number": 14, "usage_type": "name"}, {"api_name": "keras.callbacks.Callback", "line_number": 16, "usage_type": "argument"}, {"api_name": "numpy.inf", "line_number": 24, "usage_type": "name"}, {"api_name": "Print.printflush", "line_number": 31, "usage_type": "call"}, {"api_name": "bokeh.plotting.output_server", "line_number": 33, "usage_type": "call"}, {"api_name": "bokeh.plotting.cursession", "line_number": 34, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "bokeh.plotting.show", "line_number": 40, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 43, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 45, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 46, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 47, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 48, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 49, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 50, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 53, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 67, "usage_type": "argument"}, {"api_name": "copy.copy", "line_number": 77, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 82, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 92, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 95, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 98, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 100, "usage_type": "call"}, {"api_name": "Print.printflush", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "399666237", "text": "import pygame\nimport board\nfrom pygame.locals import *\n\nwidth = 700\nheight = 600\n\n\ndef start():\n pygame.init()\n screen = pygame.display.set_mode((width, height))\n game_loop(screen)\n pygame.quit()\n\n\ndef game_loop(screen):\n game_board = board.Board()\n colors = {0: (255, 255, 255), 1: (200, 0, 0), 2: (255, 255, 0)}\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == MOUSEBUTTONDOWN:\n if game_board.winner is not None:\n game_board.reset_board()\n pos = pygame.mouse.get_pos()\n pos = pos[0] // 100\n game_board.move(pos)\n\n screen.fill((0, 0, 255))\n for i in range(board.board_size[0]):\n for j in range(board.board_size[1]):\n pygame.draw.circle(screen, (5, 5, 5), (50 + 100 * j, 550 - 100 * i), 45)\n pygame.draw.circle(screen, colors[game_board.board[i, j]], (50 + 100 * j, 550 - 100 * i), 43)\n\n if game_board.winner is not None:\n pygame.display.set_caption('Show Text')\n font = pygame.font.Font('Inkfree.ttf', 72)\n text = font.render(f\"player {game_board.winner} win!!!\", True, colors[game_board.winner], (0, 0, 0))\n textRect = text.get_rect()\n textRect.center = (width // 2, height // 2)\n screen.blit(text, textRect)\n\n pygame.display.flip()\n", "sub_path": "UI.py", "file_name": "UI.py", "file_ext": "py", "file_size_in_byte": 1500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.init", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 13, "usage_type": "call"}, {"api_name": "board.Board", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 27, "usage_type": "attribute"}, {"api_name": "board.board_size", "line_number": 32, "usage_type": "attribute"}, {"api_name": "board.board_size", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "331807531", "text": "# import numpy as np\n\n# from keras.models import Sequential\n# from keras.layers import Dense, Dropout, Flatten\n# from keras.layers import Convolution2D, MaxPooling2D\n\n# np.random.seed(42) # set deterministic reproducibility\n\nimport image\nfrom log import debug\nfrom keras.models import model_from_json\nfrom multiclasshelper import MultiClassHelper\nfrom image import image2pix, get_resized_pixels\nimport pickle\nimport numpy as np\n\nclass wswtm():\n \"\"\"\n Determines which pixel data is used.\n \"\"\"\n red = True\n green = False\n blue = False\n\n default_model_path = 'resources/models/'\n default_model_name = 'basic_cnn'\n\n load_on_init = True\n\n def __init__(self):\n if self.load_on_init is True:\n self.init()\n\n def init(self):\n model, dct = self.load_model(self.default_model_path, self.default_model_name)\n\n self.model = model\n self.dct = dct\n\n \"\"\"\n Loads a trained model\n \"\"\"\n def load_model(self, model_path, model_name):\n self.model_path = model_path\n self.model_name = model_name\n\n json_file = open(self.model_path + self.model_name + '.json', 'r')\n loaded_model = json_file.read()\n json_file.close()\n \n model = model_from_json(loaded_model)\n model.load_weights(self.model_path + self.model_name + \".h5\")\n dct = pickle.load(open(self.model_path + self.model_name + '_dct.p', 'rb'))\n\n return model, dct\n\n \"\"\"\n returns an array of possible tags for a give image (specified by a path)\n \"\"\"\n def image2tags(self, path, treshold=0.75):\n p, _, _ = get_resized_pixels(path, 96, 96)\n pxl = []\n\n for px in p:\n pxl.append(px[0]) \n\n vec = np.asarray(pxl)\n vec = vec.reshape(1, 96, 96, 1)\n\n if self.model is None:\n self.init()\n\n r = self.model.predict(vec)\n\n mch = MultiClassHelper()\n\n rp = mch.array_to_classes_with_prob(r[0], self.dct)\n return rp\n\n \n \"\"\"\n returns a list of all classes known to the classifier\n \"\"\"\n def get_classes(self):\n if self.dct:\n return self.dct.keys()\n return None\n\ndebug(\"wswtm init...\") \n\n", "sub_path": "src/python/wswtm.py", "file_name": "wswtm.py", "file_ext": "py", "file_size_in_byte": 2215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "keras.models.model_from_json", "line_number": 51, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 53, "usage_type": "call"}, {"api_name": "image.get_resized_pixels", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 67, "usage_type": "call"}, {"api_name": "multiclasshelper.MultiClassHelper", "line_number": 75, "usage_type": "call"}, {"api_name": "log.debug", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "64237498", "text": "#! /usr/bin/python3\n# 29ImageSiteDownloader.py: This program allows you to search for a category\n\nimport requests\nimport bs4\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.by import By\nimport os\n\n# Scrape the front page for elements with the \".Tag-name\" class\ndef scrapeFrontPageTags():\n tag_list = []\n options = Options()\n options.headless = True\n browser = webdriver.Firefox(options=options)\n browser.get('https://imgur.com/')\n browser.implicitly_wait(5)\n more_tags_button = browser.find_element(By.CLASS_NAME, 'TrendingTags-labelToggle')\n more_tags_button.click()\n tags = browser.find_elements(By.CLASS_NAME, 'Tag-name')\n for tag in tags:\n tag_list.append(str(tag.text))\n browser.quit()\n \n return tag_list\n\n\ndef display_tags(tag_list):\n print('### TAG LIST ###')\n for tag in tag_list:\n # Add 1 to the .index(tag) value to account for index starting from 0:\n print('Tag #%s: %s' % (tag_list.index(tag) + 1, tag))\n user_selection = int(input('Please type in the number of the tag you wish to download images from Imgur and press the Enter (return) key: '))\n # Subtract 1 from 'user_selection' to account for index starting from 0:\n user_selection = user_selection - 1\n print('user_selection: %s' % (user_selection))\n images_tag_link = str('https://www.imgur.com/t/' + str(tag_list[user_selection]))\n print('images_tag_link: %s' % (images_tag_link))\n \n return images_tag_link\n\n\ndef scrape_images_tag_link(images_tag_link):\n image_list = []\n options = Options()\n options.headless = True\n browser = webdriver.Firefox(options=options)\n browser.get(images_tag_link)\n browser.implicitly_wait(5)\n images = browser.find_elements(By.CSS_SELECTOR, 'div.Post-item-media > img')\n print('\\n\\nGoing through images from images_tag_link: ')\n for image in images:\n image_list.append(image.get_attribute('src'))\n \n return image_list\n\n\ndef writeImagesToFile(image_list):\n cwd = os.getcwd()\n imgurImagesPath = str(str(cwd) + '/imgurImages')\n if not os.path.exists(imgurImagesPath):\n os.makedirs(str(imgurImagesPath))\n os.chdir(imgurImagesPath)\n for image in image_list:\n print('\\n\\n')\n print('imgurImages')\n print('image: ' + str(image))\n indexNumber = image_list.index(image) + 1\n imageName = str('image' + str(indexNumber))\n image_file = open(str(str(imageName) + '.png'), 'wb')\n http_image_link = str(image)\n print('http_image_link: %s' % (http_image_link))\n r = requests.get(http_image_link, timeout=5)\n for chunk in r.iter_content(100000):\n image_file.write(chunk)\n image_file.close()\n print('Imgur images downloaded successfully! Thank you, and good bye!')\n\n \ndef main():\n tag_list = scrapeFrontPageTags()\n images_tag_link = display_tags(tag_list)\n image_list = scrape_images_tag_link(images_tag_link)\n writeImagesToFile(image_list)\n \n \nif __name__ == \"__main__\":\n main()\n", "sub_path": "python/03AutomateTheBoringStuffWithPython/11Webscraping/29ImageSiteDownloader.py", "file_name": "29ImageSiteDownloader.py", "file_ext": "py", "file_size_in_byte": 3098, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 51, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 63, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "44860443", "text": "from __future__ import print_function\n\nimport errno\nimport os\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\n\n\nEPS = 1e-7\n\n\ndef assert_eq(real, expected):\n assert real == expected, '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef assert_array_eq(real, expected):\n assert (np.abs(real-expected) < EPS).all(), \\\n '%s (true) vs %s (expected)' % (real, expected)\n\n\ndef load_folder(folder, suffix):\n imgs = []\n for f in sorted(os.listdir(folder)):\n if f.endswith(suffix):\n imgs.append(os.path.join(folder, f))\n return imgs\n\n\ndef load_imageid(folder):\n images = load_folder(folder, 'jpg')\n img_ids = set()\n for img in images:\n img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])\n img_ids.add(img_id)\n return img_ids\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef weights_init(m):\n \"\"\"custom weights initialization.\"\"\"\n cname = m.__class__\n if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:\n m.weight.data.normal_(0.0, 0.02)\n elif cname == nn.BatchNorm2d:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n else:\n print('%s is not initialized.' % cname)\n\n\ndef init_net(net, net_file):\n if net_file:\n net.load_state_dict(torch.load(net_file))\n else:\n net.apply(weights_init)\n\n\ndef create_dir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n\nclass Logger(object):\n def __init__(self, output_name):\n dirname = os.path.dirname(output_name)\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n self.log_file = open(output_name, 'w')\n self.infos = {}\n\n def append(self, key, val):\n vals = self.infos.setdefault(key, [])\n vals.append(val)\n\n def log(self, extra_msg=''):\n msgs = [extra_msg]\n for key, vals in self.infos.iteritems():\n msgs.append('%s %.6f' % (key, np.mean(vals)))\n msg = '\\n'.join(msgs)\n self.log_file.write(msg + '\\n')\n self.log_file.flush()\n self.infos = {}\n return msg\n\n def write(self, msg):\n self.log_file.write(msg + '\\n')\n self.log_file.flush()\n print(msg)\n\n\nclass EvalbyTypeLogger(object):\n def __init__(self, a_type_dict, q_type_dict):\n self.a_type_dict = a_type_dict\n self.q_type_dict = q_type_dict\n self.at_num = len(a_type_dict)\n self.qt_num = len(q_type_dict)\n\n self.at_accu = np.zeros(self.at_num)\n self.at_count = np.zeros(self.at_num)\n self.qt_accu = np.zeros(self.qt_num)\n self.qt_count = np.zeros(self.qt_num)\n\n def update(self, score_tensor, a_type, q_type):\n \"\"\"\n score_tensor: [batch_size, num_answers]\n a_type: [batch_size] LongTensor\n q_type: [batch_size] LongTensor\n \"\"\"\n batch_scores = score_tensor.sum(1)\n a_type = a_type.view(-1)\n q_type = q_type.view(-1)\n\n for i in range(self.at_num):\n num_at_i = torch.nonzero(a_type == (i+1)).numel()\n self.at_count[i] += num_at_i\n score_at_i = ((a_type == (i+1)).float() * batch_scores).sum()\n self.at_accu[i] += score_at_i\n\n for i in range(self.qt_num):\n num_qt_i = torch.nonzero(q_type == (i+1)).numel()\n self.qt_count[i] += num_qt_i\n score_qt_i = ((q_type == (i+1)).float() * batch_scores).sum()\n self.qt_accu[i] += score_qt_i\n\n def printResult(self, show_q_type=False, show_a_type=True):\n if(show_a_type):\n print(\"========== Accuracy by Type of Answers ==========\")\n for key in self.a_type_dict.keys():\n type_score = self.at_accu[self.a_type_dict[key]-1]\n type_num = self.at_count[self.a_type_dict[key]-1] + 1e-10\n print('Type: \\t %s \\t Accuracy: \\t %.6f \\t Total Tpye Num: \\t %.1f' % (key, float(type_score)/float(type_num), float(type_num)) )\n if(show_q_type):\n print(\"========== Accuracy by Type of Questions ==========\")\n for key in self.q_type_dict.keys():\n type_score = self.qt_accu[self.q_type_dict[key]-1]\n type_num = self.qt_count[self.q_type_dict[key]-1] + 1e-10\n print('Type: \\t %s \\t Accuracy: \\t %.6f \\t Total Tpye Num: \\t %.1f' % (key, float(type_score)/float(type_num), float(type_num)) )\n #print(\"==================== End print ====================\")\n\n\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.abs", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "445510582", "text": "import tensorflow as tf\nfrom functools import partial\nfrom sys import stdout\nfrom sklearn.model_selection import KFold\nimport time\nfrom tensorflow.python.client import timeline\n\nfrom noise_models_and_integration import *\n\ndef fidelity_cost_fn(network,y_, learning_rate, params, n_ts, evo_time,dim, noise_name):\n\n tmp_integrate_lind = partial(integrate_lind, params=params, n_ts=n_ts, evo_time=evo_time, noise_name=noise_name, tf_result=True)\n net = tf.cast(network, tf.complex128)\n\n ctrls_to_mtx = tf.map_fn(tmp_integrate_lind, net) # new batch in which instead of control pulses i have matrices\n\n batch_to_loss_fn = tf.stack([y_, ctrls_to_mtx], axis=1) # create tensor of pairs (target, generated_matrix)\n tmp_fid_err = partial(fidelity_err, dim=dim, tf_result=True)\n batch_of_fid_err = tf.map_fn(tmp_fid_err, batch_to_loss_fn, dtype=tf.float32) # batch of fidelity errors\n\n loss = tf.cast(tf.reduce_mean(batch_of_fid_err),\n tf.float32) # loss function, which is a mean of fid_erros over batch\n tf.summary.scalar('loss_func', loss)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n # optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9).minimize(loss)\n accuracy = tf.cast(tf.reduce_mean(1 - batch_of_fid_err), tf.float32)\n return (optimizer, accuracy)\n\ndef my_lstm(x_,controls_nb, size_of_lrs, keep_prob):\n # 'layers' is a list of the number of the units on each layer\n\n cells = []\n for n_units in size_of_lrs:\n cell = tf.nn.rnn_cell.LSTMCell(num_units=n_units, use_peepholes=True)\n\n # cell = tf.nn.rnn_cell.GRUCell(num_units=n_units)\n cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=keep_prob)\n cells.append(cell)\n cells\n\n print(\"yes dropout wrapper\")\n outputs = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_fw=cells,\n cells_bw=cells,\n inputs=x_,\n dtype=tf.float32,\n parallel_iterations=32\n )\n # for one_lstm_cell in cells:\n # print(one_lstm_cell.variables)\n # one_kernel, one_bias,w_f_diag,w_i_diag, w_o_diag = one_lstm_cell.variables\n # # one_kernel, one_bias, two_kernel, two_bias = one_lstm_cell.variables\n # tf.summary.histogram(\"Kernel\", one_kernel)\n # tf.summary.histogram(\"Bias\", one_bias)\n # # tf.summary.histogram(\"Kernel2\", two_kernel)\n # # tf.summary.histogram(\"Bias2\", two_bias)\n #\n # tf.summary.histogram(\"w_f_diag\", w_f_diag)\n # tf.summary.histogram(\"w_i_diag\", w_i_diag)\n # tf.summary.histogram(\"w_o_diag\", w_o_diag)\n\n print(outputs[2])\n output_fw, output_bw= tf.split(outputs[0], 2, axis=2)\n tf.summary.histogram(\"output_fw\", output_fw)\n tf.summary.histogram(\"output_bw\", output_bw)\n tf.summary.histogram(\"cell_fw\", outputs[1][0])\n tf.summary.histogram(\"cell_bw\", outputs[2][0])\n sum_fw_bw = tf.add(output_fw, output_bw)\n squeezed_layer = tf.reshape(sum_fw_bw, [-1, size_of_lrs[-1]])\n droput = tf.nn.dropout(squeezed_layer, keep_prob)\n dense = tf.contrib.layers.fully_connected(droput, controls_nb, activation_fn=tf.nn.tanh)\n output = tf.reshape(dense, [tf.shape(x_)[0],tf.shape(x_)[1], controls_nb])\n return output\n\n\n\ndef fit(sess,\n network,\n x_,\n y_,\n keep_prob,\n train_input,\n train_target,\n test_input,\n test_target,\n nb_epochs,\n batch_size,\n train_set_size,\n learning_rate,\n model_params,\n n_ts,\n evo_time,\n dim,\n noise_name):\n\n tensorboard_path = 'tensorboard/' + str(time.ctime())\n\n\n optimizer, accuracy = fidelity_cost_fn(network, y_, learning_rate, model_params, n_ts, evo_time,dim, noise_name)\n\n\n # 500 is the number of test samples used in monitoring the efficiency of the network\n test_sample_indices = np.arange(500)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)\n kf = KFold(n_splits=(train_set_size//batch_size), shuffle = True)\n print(np.shape(test_input))\n # LEARNING LOOP\n with sess.as_default():\n\n # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n sess.run(tf.global_variables_initializer())\n j = -1\n train_table = []\n test_table = []\n for i in range(int(np.ceil(nb_epochs / (train_set_size // batch_size)))):\n for train_index, rand in kf.split(train_input, train_target):\n j += 1\n batch = (train_input[rand], train_target[rand])\n # batch = (train_input[(j%train_set_size):((j+batch_size)%train_set_size)], train_target[(j%train_set_size):((j+batch_size)%train_set_size)])\n # MONITORING OF EFFICENCY\n if j % 1000 == 0:\n summary, train_accuracy = sess.run( [merged, accuracy], feed_dict={x_: batch[0],\n y_: batch[1],\n keep_prob: 1.0})\n train_table.append(train_accuracy)\n\n test_accuracy = accuracy.eval(feed_dict={x_: test_input[test_sample_indices],\n y_: test_target[test_sample_indices],\n keep_prob: 1.0})\n test_table.append(test_accuracy)\n train_writer.add_summary(summary, j)\n print(\"step %d, training accuracy %g\" % (j, train_accuracy))\n stdout.flush()\n print(\"step %d, test accuracies %g\" % (j, test_accuracy))\n stdout.flush()\n print (\" \")\n stdout.flush()\n sess.run(optimizer,\n feed_dict={x_: batch[0],\n y_: batch[1],\n keep_prob: 0.5})#,options=options, run_metadata=run_metadata)\n\n # fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n # chrome_trace = fetched_timeline.generate_chrome_trace_format()\n #\n # with open('timeline_02_step_{}.json'.format(i), 'w') as f:\n # f.write(chrome_trace)\n test_accuracy = accuracy.eval(feed_dict={x_: test_input,\n y_: test_target,\n keep_prob: 1.})\n\n return (test_accuracy,train_table,test_table)\n\n\ndef get_prediction(sess, network, x_, keep_prob, test_input):\n\n prediction = sess.run(network, feed_dict={x_:test_input,\n keep_prob: 1.0})\n return prediction\n", "sub_path": "architecture.py", "file_name": "architecture.py", "file_ext": "py", "file_size_in_byte": 6851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "functools.partial", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.complex128", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.map_fn", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 17, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.map_fn", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.LSTMCell", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.DropoutWrapper", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.split", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.fully_connected", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 73, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 115, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 137, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 137, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 139, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 139, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "370998504", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 15 14:39:29 2019\n\n@author: em812\n\"\"\"\nimport pdb\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\n\ndef univariate_tests(\n X, y, control='N2', test='ANOVA',\n comparison_type='multiclass',\n multitest_correction='fdr_by', fdr=0.05,\n n_jobs=-1):\n \"\"\"\n Test whether a single compound has siginificant effects compared to the\n control using univariate tests for each feature.\n Each feature is tested using one of the methods 'ANOVA', 'Kruskal-Wallis',\n 'Mann-Whitney test' or 't-test'.\n The pvalues from the different features are corrected for multiple\n comparisons using the multitest methods of statsmodels.\n\n Parameters\n ----------\n X : TYPE\n DESCRIPTION.\n y : TYPE\n DESCRIPTION.\n comparison_type : TYPE, optional\n DESCRIPTION. The default is 'multiclass'.\n control : float, optional. The default is .0.\n The drug_dose entry for the control points.\n Must provide control dose if the comparison_type is 'binary_each_group'.\n test : TYPE, optional\n DESCRIPTION. The default is 'ANOVA'.\n multitest_correction : string or None, optional\n DESCRIPTION. The default is 'fdr_by'.\n fdr : TYPE, optional\n DESCRIPTION. The default is 0.05.\n\n Raises\n ------\n ValueError\n DESCRIPTION.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n TYPE\n DESCRIPTION.\n\n \"\"\"\n from scipy.stats import kruskal, mannwhitneyu, f_oneway, ttest_ind\n from functools import partial\n\n if not np.isin(control, np.array(y)):\n raise ValueError('control not found in the y array.')\n\n if test.startswith('Wilkoxon') or test == 't-test':\n if comparison_type=='multiclass' and np.unique(y).shape[0]>2:\n raise ValueError(\n \"\"\"\n The Wilkoxon rank sum test cannot be used to compare between\n more than two groups. Use a different test or the\n binary_each_dose comparison_method instead.\n \"\"\")\n else:\n comparison_type = 'binary_each_group'\n\n if not isinstance(X, pd.DataFrame):\n X = pd.DataFrame(X)\n\n # Local function for parallel processing of univariate tests for each drug\n def stats_test(X, y, test, **kwargs):\n from joblib import Parallel, delayed\n\n def _one_fit(ift, samples, **kwargs):\n samples = [s[~np.isnan(s)] for s in samples if not all(np.isnan(s))]\n if len(samples)<2:\n return ift, (np.nan, np.nan)\n return ift, test(*samples, **kwargs)\n\n parallel = Parallel(n_jobs=n_jobs, verbose=True)\n func = delayed(_one_fit)\n\n try:\n res = parallel(\n func(ift, [sample[:,ift]\n for sample in [np.array(X[y==iy]) for iy in np.unique(y)]],\n **kwargs)\n for ift in range(X.shape[1]))\n except:\n pdb.set_trace()\n\n order = [ift for ift,(r,p) in res]\n rs = np.array([r for ift,(r,p) in res])\n ps = np.array([p for ift,(r,p) in res])\n\n return rs[order], ps[order]\n\n # Create the function that will test every feature of a given drug\n if test == 'ANOVA':\n func = partial(stats_test, test=f_oneway)\n elif test.startswith('Kruskal'):\n func = partial(stats_test, test=kruskal, nan_policy='raise')\n elif test.startswith('Mann-Whitney'):\n func = partial(stats_test, test=mannwhitneyu)\n if test == 't-test':\n func = partial(stats_test, test=ttest_ind)\n\n # For each dose get significant features\n if comparison_type=='multiclass':\n stats, pvals = func(X, y)\n pvals = pd.DataFrame(pvals.T, index=X.columns, columns=[test])\n stats = pd.DataFrame(stats.T, index=X.columns, columns=[test])\n\n elif comparison_type=='binary_each_group':\n groups = np.unique(y[y!=control])\n\n pvals=[]\n stats=[]\n for igrp, grp in enumerate(groups):\n\n mask = np.isin(y,[control, grp])\n _stats, _pvals = func(X[mask], y[mask])\n\n pvals.append(_pvals)\n stats.append(_stats)\n pvals = pd.DataFrame(np.array(pvals).T, index=X.columns, columns=groups)\n stats = pd.DataFrame(np.array(stats).T, index=X.columns, columns=groups)\n else:\n raise ValueError('Comparison type not recognised.')\n\n reject, pvals = _multitest_correct(pvals, multitest_correction, fdr)\n\n return stats, pvals, reject\n\ndef get_effect_sizes(\n X, y, control='N2',\n test='ANOVA', comparison_type='multiclass',\n n_jobs=-1):\n \"\"\"\n Test whether a single compound has siginificant effects compared to the\n control using univariate tests for each feature.\n Each feature is tested using one of the methods 'ANOVA', 'Kruskal-Wallis',\n 'Mann-Whitney' or 't-test'.\n The pvalues from the different features are corrected for multiple\n comparisons using the multitest methods of statsmodels.\n\n Parameters\n ----------\n X : TYPE\n DESCRIPTION.\n drcomparison_variableug_dose : TYPE\n DESCRIPTION.\n comparison_type : TYPE, optional\n DESCRIPTION. The default is 'multiclass'.\n control : float, optional. The default is .0.\n The drug_dose entry for the control points.\n Must provide control dose if the comparison_type is 'binary_each_dose'.\n test : TYPE, optional\n DESCRIPTION. The default is 'ANOVA'.\n multitest_method : TYPE, optional\n DESCRIPTION. The default is 'fdr_by'.\n fdr : TYPE, optional\n DESCRIPTION. The default is 0.05.\n\n Raises\n ------\n ValueError\n DESCRIPTION.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n TYPE\n DESCRIPTION.\n\n \"\"\"\n if not np.isin(control, np.array(y)):\n raise ValueError('control not found in the comparison_variable array.')\n\n if comparison_type=='multiclass' and np.unique(y).shape[0]>2:\n if test.startswith('Mann') or test == 't-test':\n raise ValueError(\n \"\"\"\n The Mann-Whitney test cannot be used to compare between\n more than two groups. Use a different test or the\n binary_each_dose comparison_method instead.\n \"\"\")\n\n if not isinstance(X, pd.DataFrame):\n X = pd.DataFrame(X)\n\n groups = np.unique(y[y!=control])\n\n # For each dose get significant features\n if test=='ANOVA' and comparison_type=='multiclass':\n effect = pd.Series(index=X.columns)\n samples = [x for ix,x in X.groupby(by=y)]\n for ft in X.columns:\n effect[ft] = eta_squared_ANOVA(*[s.loc[~s[ft].isna(), ft] for s in samples])\n effect = pd.DataFrame(effect, columns=['_'.join([test,'effect_size'])])\n else:\n if test=='Mann-Whitney' or test=='Kruskal-Wallis':\n func = cliffs_delta\n elif test=='ANOVA':\n func = eta_squared_ANOVA\n elif test=='t-test':\n func = cohen_d\n\n effect = pd.DataFrame(index=X.columns, columns=groups)\n for igrp, grp in enumerate(groups):\n mask = np.isin(y,[control, grp])\n samples = [x for ix,x in X[mask].groupby(by=y)]\n for ft in X.columns:\n effect.loc[ft, grp] = func(*[s.loc[~s[ft].isna(), ft] for s in samples])\n\n return effect\n\n#%% Effect size functions\ndef cohen_d(x,y):\n \"\"\" Return the cohen d effect size for t-test\n\n \"\"\"\n from numpy import nanstd, nanmean, sqrt\n\n nx = len(x)\n ny = len(y)\n dof = nx + ny - 2\n return (nanmean(x) - nanmean(y)) / sqrt(((nx-1)*nanstd(x, ddof=1) ** 2 + (ny-1)*nanstd(y, ddof=1) ** 2) / dof)\n\n\ndef eta_squared_ANOVA( *args):\n \"\"\" Return the eta squared as the effect size for ANOVA\n\n \"\"\"\n return( float( __ss_between_( *args) / __ss_total_( *args)))\n\ndef cliffs_delta(lst1, lst2):\n\n \"\"\"Returns delta and true if there are more than 'dull' differences\"\"\"\n m, n = len(lst1), len(lst2)\n lst2 = sorted(lst2)\n j = more = less = 0\n for repeats, x in _runs(sorted(lst1)):\n while j <= (n - 1) and lst2[j] < x:\n j += 1\n more += j*repeats\n while j <= (n - 1) and lst2[j] == x:\n j += 1\n less += (n - j)*repeats\n d = (more - less) / (m*n)\n return d\n\ndef __concentrate_( *args):\n \"\"\" Concentrate input list-like arrays\n\n \"\"\"\n v = list( map( np.asarray, args))\n vec = np.hstack( np.concatenate( v))\n return( vec)\n\ndef __ss_total_( *args):\n \"\"\" Return total of sum of square\n\n \"\"\"\n vec = __concentrate_( *args)\n ss_total = sum( (vec - np.mean( vec)) **2)\n return( ss_total)\n\ndef __ss_between_( *args):\n \"\"\" Return between-subject sum of squares\n\n \"\"\"\n # grand mean\n grand_mean = np.mean( __concentrate_( *args))\n\n ss_btwn = 0\n for a in args:\n ss_btwn += ( len(a) * ( np.mean( a) - grand_mean) **2)\n\n return( ss_btwn)\n\ndef _runs(lst):\n \"\"\"Iterator, chunks repeated values\"\"\"\n for j, two in enumerate(lst):\n if j == 0:\n one, i = two, 0\n if one != two:\n yield j - i, one\n i = j\n one = two\n yield j - i + 1, two\n\n#%% Correct for multiple comparisons\n\ndef _multitest_correct(pvals, multitest_method, fdr):\n \"\"\"\n Multiple comparisons correction of pvalues from univariate tests.\n Ignores nan values.\n Deals with two options:\n - 1D array of pvalues (one comparison per feature)\n - 2D array of pvalues (multiple comparisons per feature)\n\n Parameters\n ----------\n pvals : pandas series shape=(n_features,) or\n pandas dataframe shape=(n_features, n_doses)\n The pandas structure containing the pvalues from all the statistical\n tests done for a single drug.\n multitest_method : string\n The method to use in statsmodels.statis.multitest.multipletests function.\n fdr : float\n False discovery rate.\n\n Returns\n -------\n c_reject : pandas series shape=(n_features)\n Flags indicating rejected null hypothesis after the correction for\n multiple comparisons. The null hypothesis for each feature is that the\n feature is not affected by the compound.\n c_pvals : pandas series shape=(n_features)\n The corrected pvalues for each feature. When each dose was tested\n separately, the min pvalue is stored in this output.\n\n \"\"\"\n from statsmodels.stats.multitest import multipletests\n\n if multitest_method is None:\n return pvals[0-9a-zA-Z-]+)\n \"\"\"\n\n _TESTS = [\n {\n \"url\": \"http://www.youmaker.com/v/71b5d2c5-31b6-43b8-8475-1dcb5e10dfb0\",\n \"info_dict\": {\n \"id\": \"71b5d2c5-31b6-43b8-8475-1dcb5e10dfb0\",\n \"ext\": \"mp4\",\n \"title\": \"Как сшить шапочку из трикотажа. Плоский шов двойной иглой.\",\n \"description\": r\"re:(?s)^Привет друзья!\\n\\nВ этом видео я .* представлена www\\.iksonmusic\\.com$\",\n \"thumbnail\": r\"re:^https?://.*\\.(?:jpg|png)$\",\n \"duration\": 358,\n \"upload_date\": \"20190614\",\n \"uploader\": \"user_318f21e00e1f8a6b414f20a654d0f4fc7d2053bc\",\n \"timestamp\": 1560554895,\n \"channel\": \"Sewing Ideas\",\n \"channel_id\": \"40ca79f7-8b21-477f-adba-7d0f81e5b5fd\",\n \"channel_url\": r\"re:https?://www.youmaker.com/channel/40ca79f7-8b21-477f-adba-7d0f81e5b5fd\",\n \"tags\": [\n \"как сшить детскую шапочку из трикотажа\",\n \"как шить двойной иглой трикотаж\",\n ],\n \"categories\": [\"Life\", \"How-to & DIY\"],\n },\n \"params\": {\n \"skip_download\": True,\n },\n },\n {\n # all videos from channel\n \"url\": \"http://www.youmaker.com/channel/f06b2e8d-219e-4069-9003-df343ac5fcf3\",\n \"info_dict\": {\n \"id\": \"f06b2e8d-219e-4069-9003-df343ac5fcf3\",\n \"title\": \"YoYo Cello\",\n \"description\": \"Connect the World Through Music. \\nConnect Our Hearts with Music.\",\n },\n \"playlist_mincount\": 30,\n \"params\": {\n \"nocheckcertificate\": True,\n },\n },\n {\n # all videos from channel playlist\n \"url\": \"https://www.youmaker.com/channel/f8d585f8-2ff7-4c3c-b1ea-a78d77640d54/\"\n \"playlists/f99a120c-7a5e-47b2-9235-3817d1c12662\",\n \"info_dict\": {\n \"id\": \"f99a120c-7a5e-47b2-9235-3817d1c12662\",\n \"title\": \"Mini Cakes\",\n },\n \"playlist_mincount\": 9,\n \"params\": {\n \"nocheckcertificate\": True,\n },\n },\n ]\n REQUEST_LIMIT = 50\n\n def __init__(self, downloader=None):\n \"\"\"Constructor. Receives an optional downloader.\"\"\"\n super(YoumakerIE, self).__init__(downloader=downloader)\n self._protocol = \"https\"\n self._category_map = None\n self._cache = {}\n\n @staticmethod\n def _extract_url(webpage):\n match = re.search(\n r']+src=\"(?Phttps?://(?:www\\.)?youmaker\\.com/embed/[0-9a-zA-Z-]+)[^\"]*\"',\n webpage,\n )\n return match.group(\"url\") if match else None\n\n def _fix_url(self, url):\n if url.startswith(\"//\"):\n return \"%s:%s\" % (self._protocol, url)\n return url\n\n @property\n def _base_url(self):\n return self._fix_url(\"//www.youmaker.com\")\n\n @property\n def _asset_url(self):\n # as this url might change in the future\n # it needs to be extracted from some js magic...\n return self._fix_url(\"//vs.youmaker.com/assets\")\n\n def _live_url(self, video_id):\n return self._fix_url(\"//live.youmaker.com/%s/playlist.m3u8\" % video_id)\n\n def _call_api(self, uid, path, what=\"JSON metadata\", fatal=True, **kwargs):\n \"\"\"\n call the YouMaker JSON API and return a valid data object\n\n path: API endpoint\n what: query description\n fatal: if True might raise ExtractorError otherwise warn and return None\n **kwargs: parameters passed to _download_json()\n \"\"\"\n url = \"%s/v1/api/%s\" % (self._base_url, path)\n kwargs.setdefault(\"note\", \"Downloading %s\" % what)\n kwargs.setdefault(\"errnote\", \"Failed to download %s\" % what)\n info = self._download_json(url, uid, fatal=fatal, **kwargs)\n\n # soft error already reported\n if info is False:\n return None\n\n status = try_get(info, itemgetter(\"status\"), compat_str)\n data = try_get(info, itemgetter(\"data\"), (list, dict))\n\n if status != \"ok\":\n msg = \"%s - %s\" % (what, status or \"Bad JSON response\")\n if fatal or status is None:\n raise ExtractorError(\n msg, video_id=uid, expected=isinstance(status, compat_str)\n )\n self.report_warning(msg, video_id=uid)\n\n return data\n\n @property\n def _categories(self):\n if self._category_map is None:\n category_list = (\n self._call_api(\n None,\n \"video/category/list\",\n what=\"categories\",\n fatal=False,\n )\n or ()\n )\n self._category_map = {item[\"category_id\"]: item for item in category_list}\n return self._category_map\n\n def _categories_by_id(self, cid):\n categories = []\n if cid is None:\n return categories\n\n while True:\n item = self._categories.get(cid)\n if item is None or item[\"category_name\"] in categories:\n break\n categories.insert(0, item[\"category_name\"])\n cid = item[\"parent_category_id\"]\n\n return categories\n\n def _get_subtitles(self, system_id):\n if system_id is None:\n return {}\n\n subs_list = (\n self._call_api(\n system_id,\n \"video/subtitle\",\n what=\"subtitle info\",\n query={\"systemid\": system_id},\n fatal=False,\n )\n or ()\n )\n\n subtitles = {}\n for item in subs_list:\n subtitles.setdefault(item[\"language_code\"], []).append(\n {\"url\": \"%s/%s\" % (self._asset_url, item[\"url\"])}\n )\n\n return subtitles\n\n def _video_entry_by_metadata(self, info):\n # check some dictionary keys so it's safe to use them\n mandatory_keys = {\"video_uid\", \"title\", \"data\"}\n missing_keys = mandatory_keys - set(info.keys())\n if missing_keys:\n raise ExtractorError(\n \"Missing video metadata: %s\" % \", \".join(missing_keys),\n video_id=self.ie_key(),\n )\n\n video_uid = info[\"video_uid\"]\n tag_str = info.get(\"tag\")\n if tag_str:\n tags = [tag.strip() for tag in tag_str.strip(\"[]\").split(\",\")]\n else:\n tags = None\n\n channel_url = (\n \"%s/channel/%s\" % (self._base_url, info[\"channel_uid\"])\n if \"channel_uid\" in info\n else None\n )\n\n video_info = info[\"data\"] # asserted before\n duration = video_info.get(\"duration\")\n formats = []\n\n if info.get(\"live\") and info.get(\"live_status\") == \"start\":\n is_live = True\n playlist = self._live_url(video_uid)\n else:\n is_live = False\n playlist = video_info.get(\"videoAssets\", {}).get(\"Stream\")\n\n if playlist:\n playlist_name = os.path.basename(playlist)\n formats.extend(\n self._extract_m3u8_formats(\n self._fix_url(playlist),\n video_uid,\n ext=\"mp4\",\n entry_protocol=\"m3u8\" if is_live else \"m3u8_native\",\n note=\"Downloading %s\" % playlist_name,\n errnote=\"%s (%s)\" % (video_uid, playlist_name),\n fatal=False,\n )\n )\n\n if not formats:\n # as there are some videos on the platform with missing playlist\n # expected is set True\n raise ExtractorError(\n \"No video formats found!\", video_id=video_uid, expected=True\n )\n\n self._sort_formats(formats)\n for item in formats:\n height = try_get(item, itemgetter(\"height\"), int)\n if height:\n item[\"format_id\"] = \"%dp\" % item[\"height\"]\n tbr = try_get(item, itemgetter(\"tbr\"), (int, float))\n if duration and tbr:\n item[\"filesize_approx\"] = 128 * tbr * duration\n\n return {\n \"id\": video_uid,\n \"title\": info[\"title\"], # asserted before\n \"description\": info.get(\"description\"),\n \"formats\": formats,\n \"is_live\": is_live,\n \"timestamp\": parse_iso8601(info.get(\"uploaded_at\")),\n \"uploader\": info.get(\"uploaded_by\"),\n \"duration\": duration,\n \"categories\": self._categories_by_id(info.get(\"category_id\")),\n \"tags\": tags,\n \"channel\": info.get(\"channel_name\"),\n \"channel_id\": info.get(\"channel_uid\"),\n \"channel_url\": channel_url,\n \"thumbnail\": info.get(\"thumbmail_path\"),\n \"view_count\": info.get(\"click\"),\n \"subtitles\": self.extract_subtitles(info.get(\"system_id\")),\n }\n\n def _video_entry_by_id(self, uid):\n info = self._cache.get(uid) or self._call_api(\n uid,\n \"video/metadata/%s\" % uid,\n what=\"video metadata\",\n )\n\n return self._video_entry_by_metadata(info)\n\n def _paged_playlist_entries(self, uid, page_size=REQUEST_LIMIT):\n def fetch_page(page_number):\n offset = page_number * page_size\n info = self._call_api(\n uid,\n path=\"playlist/video\",\n what=\"playlist entries %d-%d\" % (offset + 1, offset + page_size),\n query={\"playlist_uid\": uid, \"offset\": offset, \"limit\": page_size},\n )\n if not isinstance(info, Sequence):\n raise ExtractorError(\"Unexpected playlist entries\", uid, expected=False)\n\n for item in info:\n yield self.url_result(\n \"%s/video/%s\" % (self._base_url, item[\"video_uid\"]),\n ie=self.ie_key(),\n video_id=item[\"video_uid\"],\n video_title=item[\"video_title\"],\n )\n\n _ = self._categories # preload categories\n return OnDemandPagedList(fetch_page, page_size)\n\n def _paged_channel_entries(self, uid, page_size=REQUEST_LIMIT):\n def fetch_page(page_number):\n offset = page_number * page_size\n info = self._call_api(\n uid,\n path=\"video/channel/%s\" % uid,\n what=\"channel entries %d-%d\" % (offset + 1, offset + page_size),\n query={\"offset\": offset, \"limit\": page_size},\n )\n if not isinstance(info, Sequence):\n raise ExtractorError(\"Unexpected channel entries\", uid, expected=False)\n\n for item in info:\n self._cache[item[\"video_uid\"]] = item\n yield self.url_result(\n \"%s/video/%s\" % (self._base_url, item[\"video_uid\"]),\n ie=self.ie_key(),\n video_id=item[\"video_uid\"],\n video_title=item[\"title\"],\n )\n\n _ = self._categories # preload categories\n return OnDemandPagedList(fetch_page, page_size)\n\n def _playlist_entries_by_id(self, uid):\n _ = self._categories # preload categories\n info = self._call_api(\n uid,\n \"playlist/%s\" % uid,\n what=\"playlist metadata\",\n )\n return self.playlist_result(\n self._paged_playlist_entries(\n info[\"playlist_uid\"],\n ),\n playlist_id=info[\"playlist_uid\"],\n playlist_title=info.get(\"name\"),\n playlist_description=None,\n )\n\n def _channel_entries_by_id(self, uid):\n _ = self._categories # preload categories\n info = self._call_api(\n uid,\n path=\"video/channel/metadata/%s\" % uid,\n what=\"channel metadata\",\n )\n return self.playlist_result(\n self._paged_channel_entries(\n info[\"channel_uid\"],\n ),\n playlist_id=info[\"channel_uid\"],\n playlist_title=info.get(\"name\"),\n playlist_description=info.get(\"description\"),\n )\n\n def _real_extract(self, url):\n parsed_url = ParsedURL(url)\n self._protocol = parsed_url.scheme\n\n dispatch = (\n (r\"/(?:v|video|embed)/(?P[a-zA-z0-9-]+)\", self._video_entry_by_id),\n (\n r\"(/channel/[a-zA-z0-9-]+)?/playlists?/(?P[a-zA-z0-9-]+)\",\n self._playlist_entries_by_id,\n ),\n (r\"/channel/(?P[a-zA-z0-9-]+)/?$\", self._channel_entries_by_id),\n )\n\n for regex, func in dispatch:\n match = re.match(regex, parsed_url.path)\n if not match:\n continue\n return func(**match.groupdict())\n else:\n raise ExtractorError(\"unsupported %s url\" % self.ie_key(), expected=True)\n", "sub_path": "youtube_dl/extractor/youmaker.py", "file_name": "youmaker.py", "file_ext": "py", "file_size_in_byte": 15038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "compat.compat_urllib_parse_urlparse", "line_number": 28, "usage_type": "call"}, {"api_name": "compat.compat_urlparse.parse_qsl", "line_number": 30, "usage_type": "call"}, {"api_name": "compat.compat_urlparse", "line_number": 30, "usage_type": "name"}, {"api_name": "re.match", "line_number": 34, "usage_type": "call"}, {"api_name": "common.InfoExtractor", "line_number": 60, "usage_type": "name"}, {"api_name": "re.search", "line_number": 131, "usage_type": "call"}, {"api_name": "utils.try_get", "line_number": 173, "usage_type": "call"}, {"api_name": "compat.compat_str", "line_number": 173, "usage_type": "argument"}, {"api_name": "operator.itemgetter", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.try_get", "line_number": 174, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.ExtractorError", "line_number": 179, "usage_type": "call"}, {"api_name": "compat.compat_str", "line_number": 180, "usage_type": "argument"}, {"api_name": "utils.ExtractorError", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 273, "usage_type": "name"}, {"api_name": "utils.ExtractorError", "line_number": 289, "usage_type": "call"}, {"api_name": "utils.try_get", "line_number": 295, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.try_get", "line_number": 298, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 298, "usage_type": "call"}, {"api_name": "utils.parse_iso8601", "line_number": 308, "usage_type": "call"}, {"api_name": "collections.Sequence", "line_number": 339, "usage_type": "argument"}, {"api_name": "utils.ExtractorError", "line_number": 340, "usage_type": "call"}, {"api_name": "utils.OnDemandPagedList", "line_number": 351, "usage_type": "call"}, {"api_name": "collections.Sequence", "line_number": 362, "usage_type": "argument"}, {"api_name": "utils.ExtractorError", "line_number": 363, "usage_type": "call"}, {"api_name": "utils.OnDemandPagedList", "line_number": 375, "usage_type": "call"}, {"api_name": "re.match", "line_number": 423, "usage_type": "call"}, {"api_name": "utils.ExtractorError", "line_number": 428, "usage_type": "call"}]} +{"seq_id": "209474267", "text": "import logging\n\nfrom roomserver.media.element import MediaElement\nfrom roomserver.media.pipeline import MediaPipeline\nfrom roomserver.media.session import KurentoSession\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebRTCEndPoint(MediaElement):\n def __init__(self, pipeline: MediaPipeline, session: KurentoSession):\n super().__init__(pipeline, session)\n\n async def create(self):\n try:\n a_response = await self.session.send_request(method=\"create\", params={\n \"type\": \"WebRtcEndpoint\",\n \"constructorParams\": {\n \"mediaPipeline\": self.pipeline.pipeline_id\n },\n \"properties\": {},\n })\n result = await a_response\n except Exception:\n logger.exception('COMMAND: ')\n return\n else:\n pipeline_id, element_id = result['value'].split('/')\n assert self.pipeline.pipeline_id == pipeline_id\n self.element_id = element_id\n logger.info(\"WebRtcEndpoint created: %s\", self.element_id)\n", "sub_path": "roomserver/media/web_rtc_endpoint.py", "file_name": "web_rtc_endpoint.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "roomserver.media.element.MediaElement", "line_number": 10, "usage_type": "name"}, {"api_name": "roomserver.media.pipeline.MediaPipeline", "line_number": 11, "usage_type": "name"}, {"api_name": "roomserver.media.session.KurentoSession", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "495429195", "text": "import os.path\nimport re\nimport collections\nimport logging\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\ntry:\n from utils.jobs.runners import runner\n from utils.files import copy_always, encode_lines_to_file, decode_lines_from_file\n from utils.errors import ProcessingError\n from utils.config import lazy_conf\n from utils.serialization import ingest_yaml\nexcept ImportError:\n from jobs.runners import runner\n from files import copy_always, encode_lines_to_file, decode_lines_from_file\n from errors import ProcessingError\n from serialization import ingest_yaml\n from config import lazy_conf\n\ndef munge_page(fn, regex, out_fn=None, tag='build'):\n if out_fn is None:\n out_fn = fn\n\n page_lines = [ munge_content(ln, regex) for ln in decode_lines_from_file(fn)\n if ln is not None ]\n\n if len(page_lines) > 0:\n encode_lines_to_file(out_fn, page_lines)\n logger.info('{0}: processed {1}'.format(tag, fn))\n else:\n logger.warning('{0}: did not write {1}'.format(tag, out_fn))\n\ndef munge_content(content, regex):\n if isinstance(regex, list):\n for cregex, subst in regex:\n content = cregex.sub(subst, content)\n return content\n else:\n return regex[0].sub(regex[1], content)\n\n\ndef process_page(fn, output_fn, regex, builder='processor'):\n tmp_fn = fn + '~'\n\n jobs = [\n {\n 'target': tmp_fn,\n 'dependency': fn,\n 'job': munge_page,\n 'args': dict(fn=fn, out_fn=tmp_fn, regex=regex),\n },\n {\n 'target': output_fn,\n 'dependency': tmp_fn,\n 'job': copy_always,\n 'args': dict(source_file=tmp_fn,\n target_file=output_fn,\n name=builder),\n }\n ]\n\n runner(jobs, pool=1, parallel=False, force=False)\n\ndef post_process_jobs(source_fn=None, tasks=None, conf=None):\n \"\"\"\n input documents should be:\n\n {\n 'transform': {\n 'regex': str,\n 'replace': str\n }\n 'type': \n 'file': \n }\n\n ``transform`` can be either a document or a list of documents.\n \"\"\"\n\n if tasks is None:\n conf = lazy_conf(conf)\n\n if source_fn is None:\n source_fn = os.path.join(conf.paths.project.root,\n conf.paths.builddata,\n 'processing.yaml')\n tasks = ingest_yaml(source_fn)\n elif not isinstance(tasks, collections.Iterable):\n raise ProcessingError('[ERROR]: cannot parse post processing specification.')\n\n def rjob(fn, regex, type):\n return {\n 'target': fn,\n 'dependency': None,\n 'job': process_page,\n 'args': dict(fn=fn, output_fn=fn, regex=regex, builder=type)\n }\n\n for job in tasks:\n if not isinstance(job, dict):\n raise ProcessingError('[ERROR]: invalid replacement specification.')\n elif not 'file' in job and not 'transform' in job:\n raise ProcessingError('[ERROR]: replacement specification incomplete.')\n\n if 'type' not in job:\n job['type'] = 'processor'\n\n if isinstance(job['transform'], list):\n regex = [ (re.compile(rs['regex']), rs['replace'])\n for rs in job['transform'] ]\n else:\n regex = (re.compile(job['transform']['regex']), job['transform']['replace'])\n\n if isinstance(job['file'], list):\n for fn in job['file']:\n yield rjob(fn, regex, job['type'])\n else:\n yield rjob(job['file'], regex, job['type'])\n\ndef truncate_file(fn, start_after=None, end_before=None):\n with open(fn, 'r') as f:\n source_lines = f.readlines()\n\n start_idx = 0\n end_idx = len(source_lines) - 1\n\n for idx, ln in enumerate(source_lines):\n if start_after is not None:\n if start_idx != 0 and ln.startswith(start_after):\n start_idx = idx - 1\n start_after = None\n\n if end_before is not None:\n if ln.startswith(end_before):\n end_idx = idx\n break\n\n with open(fn, 'w') as f:\n f.writelines(source_lines[start_idx:end_idx])\n\ndef append_to_file(fn, text):\n with open(fn, 'a') as f:\n f.write('\\n')\n f.write(text)\n", "sub_path": "utils/transformations.py", "file_name": "transformations.py", "file_ext": "py", "file_size_in_byte": 4510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "files.decode_lines_from_file", "line_number": 25, "usage_type": "call"}, {"api_name": "files.encode_lines_to_file", "line_number": 29, "usage_type": "call"}, {"api_name": "jobs.runners", "line_number": 46, "usage_type": "name"}, {"api_name": "files.copy_always", "line_number": 56, "usage_type": "name"}, {"api_name": "jobs.runners.runner", "line_number": 63, "usage_type": "call"}, {"api_name": "jobs.runners", "line_number": 63, "usage_type": "argument"}, {"api_name": "config.lazy_conf", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 85, "usage_type": "name"}, {"api_name": "serialization.ingest_yaml", "line_number": 88, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 89, "usage_type": "attribute"}, {"api_name": "errors.ProcessingError", "line_number": 90, "usage_type": "call"}, {"api_name": "errors.ProcessingError", "line_number": 102, "usage_type": "call"}, {"api_name": "errors.ProcessingError", "line_number": 104, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 110, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "603695197", "text": "import numpy as np\nimport math\nimport cmath\nfrom scipy import signal\n\nclass lms(object):\n def __init__(self,N):\n self.N=N\n self.w=np.random.rand(N)*0.01\n self.x=np.zeros(N)\n self.y=0\n def update(self,x,d,eta):\n self.x=self.x[:-1]\n self.x=np.insert(self.x,0,x)\n y=np.dot(self.x,self.w)\n self.w=self.w+2*eta*(d-y)*self.x\n self.y=y\n return y\n def coef(self):\n return self.w\n\nclass filtroTP(object):\n def __init__(self,ro,alpha,radio,periodo):\n self.ro=ro\n self.alpha=alpha\n self.periodo=periodo\n self.iteracion=0\n self.radio=radio\n self.y=np.zeros(4) #Arreglo para guardar y(n), y(n-1) e y(n-2)\n self.polo1=self.ro*cmath.exp(1.j*self.alpha)+self.radio*cmath.exp(1.j*(self.iteracion*2*math.pi/self.periodo+self.alpha))\n self.polo2=np.conjugate(self.polo1)\n self.a=signal.convolve([1.,-1],[1.,-2*np.absolute(self.polo1)*math.cos(np.angle(self.polo1)),np.absolute(self.polo1)**2])\n self.x=0\n def run(self,x):\n self.x=x\n self.polo1=self.ro*cmath.exp(1.j*self.alpha)+self.radio*cmath.exp(1.j*(self.iteracion*2*math.pi/self.periodo+self.alpha))\n self.polo2=np.conjugate(self.polo1)\n self.a=signal.convolve([1.,-1.],[1.,-2*np.absolute(self.polo1)*math.cos(np.angle(self.polo1)),np.absolute(self.polo1)**2])\n y=self.x-self.y[0]*self.a[1]-self.y[1]*self.a[2]-self.y[2]*self.a[3]\n self.y=self.y[:-1]\n self.y=np.insert(self.y,0,y)\n self.iteracion+=1\n if self.iteracion==self.periodo:\n self.iteracion=0\n return y\n", "sub_path": "pasa.py", "file_name": "pasa.py", "file_ext": "py", "file_size_in_byte": 1733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.random.rand", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "cmath.exp", "line_number": 30, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.conjugate", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.signal.convolve", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.absolute", "line_number": 32, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 32, "usage_type": "call"}, {"api_name": "cmath.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.conjugate", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.signal.convolve", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.absolute", "line_number": 38, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "476996928", "text": "# generic code used from flask documentation for application factory functionality\n# source link: https://flask.palletsprojects.com/en/1.1.x/tutorial/factory/\nimport os\n\nfrom flask import Flask\nfrom . import db\nimport app.api as api\nfrom flask_cors import CORS\n\ndef create_app(test_config=None):\n\t# create and configure the app\n\tapp = Flask(__name__, instance_relative_config=True)\n\tapp.config.from_mapping(\n\t\tSECRET_KEY='dev',\n\t\tDATABASE=os.path.join(app.instance_path, 'game_db.sqlite'),\n\t)\n\n\tif test_config is None:\n\t\t# load the instance config, if it exists, when not testing\n\t\tapp.config.from_pyfile('config.py', silent=True)\n\telse:\n\t\t# load the test config if passed in\n\t\tapp.config.from_mapping(test_config)\n\n\t# ensure the instance folder exists\n\ttry:\n\t\tos.makedirs(app.instance_path)\n\texcept OSError:\n\t\tpass\n\n\tdb.init_app(app)\t# registers database with app\n\n\tapp.register_blueprint(api.endpoints.bp)\n\tCORS(app)\n\treturn app\n", "sub_path": "app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "app.api", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "app.api.config.from_mapping", "line_number": 13, "usage_type": "call"}, {"api_name": "app.api.config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "app.api", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.api.instance_path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.api", "line_number": 15, "usage_type": "name"}, {"api_name": "app.api.config.from_pyfile", "line_number": 20, "usage_type": "call"}, {"api_name": "app.api.config", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.api", "line_number": 20, "usage_type": "name"}, {"api_name": "app.api.config.from_mapping", "line_number": 23, "usage_type": "call"}, {"api_name": "app.api.config", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.api", "line_number": 23, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "app.api.instance_path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.api", "line_number": 27, "usage_type": "name"}, {"api_name": "app.api", "line_number": 31, "usage_type": "argument"}, {"api_name": "app.api.register_blueprint", "line_number": 33, "usage_type": "call"}, {"api_name": "app.api", "line_number": 33, "usage_type": "name"}, {"api_name": "app.api.endpoints", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask_cors.CORS", "line_number": 34, "usage_type": "call"}, {"api_name": "app.api", "line_number": 34, "usage_type": "argument"}, {"api_name": "app.api", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "509924868", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'greboreda'\n\nimport tkRAD\nfrom countries import CountriesBox\nfrom projects import ProjectsBox\nfrom databases import DatabasesBox\nfrom extra_data import ExtraData\nfrom data import Commands\nfrom data import Ids\nimport resources\n\n\nclass MainWindow(tkRAD.RADXMLMainWindow):\n\n projects_box = None\n countries_box = None\n databases_box = None\n extra_data_box = None\n\n def init_widget(self, **kw):\n\n self.xml_build(resources.main_view_name)\n\n self.events.connect_dict({\n Commands.on_select_something: self.on_select_something,\n Commands.on_click_apply: self.on_click_apply\n })\n\n self.projects_box = ProjectsBox(self.mainframe)\n self.countries_box = CountriesBox(self.mainframe)\n self.databases_box = DatabasesBox(self.mainframe)\n self.extra_data_box = ExtraData(self.mainframe)\n\n def on_click_apply(self, *args, **kw):\n print(self.countries_box.selected_country)\n print(self.projects_box.selected_projects)\n print(self.databases_box.selected_database)\n\n def on_select_something(self, *args, **kw):\n projects = self.projects_box.selected_projects\n country = self.countries_box.selected_country\n database = self.databases_box.selected_database\n\n apply_button = self.mainframe.get_object_by_id(Ids.apply_button)\n\n can_apply = projects and country and database\n if self.extra_data_box.is_custom_hostname_checked:\n hostname = self.extra_data_box.hostname\n can_apply = can_apply and ExtraData.is_valid_hostname(hostname)\n\n state = \"normal\" if can_apply else \"disabled\"\n apply_button.config(state=state)\n\nif __name__ == \"__main__\":\n\n main = MainWindow()\n # main.maxsize(800, 500)\n main.minsize(325, 350)\n main.title(resources.app_name)\n main.run()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "tkRAD.RADXMLMainWindow", "line_number": 16, "usage_type": "attribute"}, {"api_name": "resources.main_view_name", "line_number": 25, "usage_type": "attribute"}, {"api_name": "data.Commands.on_select_something", "line_number": 28, "usage_type": "attribute"}, {"api_name": "data.Commands", "line_number": 28, "usage_type": "name"}, {"api_name": "data.Commands.on_click_apply", "line_number": 29, "usage_type": "attribute"}, {"api_name": "data.Commands", "line_number": 29, "usage_type": "name"}, {"api_name": "projects.ProjectsBox", "line_number": 32, "usage_type": "call"}, {"api_name": "countries.CountriesBox", "line_number": 33, "usage_type": "call"}, {"api_name": "databases.DatabasesBox", "line_number": 34, "usage_type": "call"}, {"api_name": "extra_data.ExtraData", "line_number": 35, "usage_type": "call"}, {"api_name": "data.Ids.apply_button", "line_number": 47, "usage_type": "attribute"}, {"api_name": "data.Ids", "line_number": 47, "usage_type": "name"}, {"api_name": "extra_data.ExtraData.is_valid_hostname", "line_number": 52, "usage_type": "call"}, {"api_name": "extra_data.ExtraData", "line_number": 52, "usage_type": "name"}, {"api_name": "resources.app_name", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "78292174", "text": "from flask import Flask, render_template, request, session\nfrom forms import SupaPlayaMaka\nimport requests, json, os\nfrom ftplib import FTP\n\n\napp = Flask(__name__)\napp.secret_key = \"TEST\"\n\n\n@app.route('/', methods=['GET', 'POST'] )\ndef index():\n return render_template('index.html')\n\n\n\n###########\n#controller gets the number of files you want to enter, before going to type in the file names\n@app.route('/howmany', methods=['GET', 'POST'] )\ndef howmany():\n form = SupaPlayaMaka()\n return render_template('howmany.html', form=form)\n\n\n\n###########\n#controller gets the number of files you want to enter, before going to type in the file names\n@app.route('/howmany_AD', methods=['GET', 'POST'] )\ndef howmany_AD():\n form = SupaPlayaMaka()\n return render_template('howmany_AD.html', form=form)\n\n\n\n###########\n#controller makes text inputs based on the number entered in howmany\n@app.route('/enterfiles', methods=['GET', 'POST'] )\ndef enterfiles():\n form = SupaPlayaMaka()\n numfiles = request.form.get('amount')\n numfilesint = int(numfiles)\n session['x'] = 1\n return render_template('enterfiles.html',form=form, numfilesint=numfilesint)\n\n\n###########\n#controller makes text inputs based on the number entered in howmany\n@app.route('/enterfiles_AD', methods=['GET', 'POST'] )\ndef enterfiles_AD():\n form = SupaPlayaMaka()\n numfiles = request.form.get('amount')\n numfilesint = int(numfiles)\n session['x'] = 1\n return render_template('enterfiles_AD.html',form=form, numfilesint=numfilesint)\n\n\n\n###########\n#controller allows files to be input for video names to playeropts\n@app.route('/selectfiles', methods=['GET', 'POST'] )\ndef selectfiles():\n form = SupaPlayaMaka()\n playa_option = request.form['playerchoice']\n session['player_option'] = playa_option\n session['x'] = 0\n return render_template('selectfiles.html', form=form)\n\n\n\n###########\n#controller makes form for selecting player type\n@app.route('/playeropts1', methods=['GET', 'POST'] )\ndef playeropts1():\n form = SupaPlayaMaka()\n return render_template('playeropts1.html', form=form)\n\n\n\n###########\n#controller takes file names from selectfiles or enterfiles and goes to entry ids\n@app.route('/entryids', methods=['GET', 'POST'] )\ndef entryids():\n form = SupaPlayaMaka()\n x = session.get('x', None)\n if x == 0:\n files = request.form.getlist('videofile')\n newfiles = []\n for f in files:\n newf = os.path.splitext(f)[0]\n newfiles.append(newf)\n session['file_var'] = newfiles\n return render_template('entryids.html', form=form, newfiles=newfiles)\n else:\n files = request.form.getlist('filename')\n newfiles = []\n for f in files:\n newfiles.append(f)\n session['file_var'] = newfiles\n return render_template('entryids.html', form=form, newfiles=newfiles)\n\n\n\n\n###########\n#controller takes file names from selectfiles or enterfiles and goes to entry ids\n@app.route('/entryids_AD', methods=['GET', 'POST'] )\ndef entryids_AD():\n form = SupaPlayaMaka()\n x = session.get('x', None)\n if x == 0:\n files = request.form.getlist('videofile')\n newfiles = []\n for f in files:\n newf = os.path.splitext(f)[0]\n newfiles.append(newf)\n session['file_var'] = newfiles\n return render_template('entryids_AD.html', form=form, newfiles=newfiles)\n else:\n files = request.form.getlist('filename')\n newfiles = []\n for f in files:\n newfiles.append(f)\n session['file_var'] = newfiles\n return render_template('entryids_AD.html', form=form, newfiles=newfiles)\n\n\n\n###########\n#controller takes file names, entry ids, and player option and outputs code\n@app.route('/codeout', methods=['GET', 'POST'] )\ndef codeout():\n download_option = request.form['downloadopts']\n # session['download_option'] = download_option\n # session['x'] = 0\n player_option = session.get('player_option', None)\n files = session.get('file_var', None)\n length = len(files)\n entryids = request.form.getlist('entryid')\n if player_option == 'standardplayer':\n return render_template('standardplayer.html', files=files, entryids=entryids, length=length, download_option=download_option)\n elif player_option == 'audioplayer':\n return render_template('audioplayer.html', files=files, entryids=entryids, length=length, download_option=download_option)\n else:\n return render_template('chapterplayer.html', files=files, entryids=entryids, length=length, download_option=download_option)\n\n\n\ndef selectfiles():\n form = SupaPlayaMaka()\n download_option = request.form['downloadopts']\n session['download_option'] = download_option\n session['x'] = 0\n return render_template('selectfiles.html', form=form)\n\n\n###########\n#controller takes file names, entry ids, and player option and outputs code\n@app.route('/codeout_AD', methods=['GET', 'POST'] )\ndef codeout_AD():\n files = session.get('file_var', None)\n length = len(files)\n entryids = request.form.getlist('entryid')\n file_idlist = []\n for f in files:\n caption_filename = f\n url_g = 'http://api.3playmedia.com/files?apikey=qPkBhpMQzvvZFJqbAw5MgaWwVMmUZtRX&q=name=%s' % caption_filename\n g = requests.get(url_g)\n response = g.text\n listofdicts = json.loads(response)\n dictchoice = listofdicts['files']\n finaldict = dictchoice[0]\n\n for key, value in finaldict.items():\n if key == 'id':\n file_id = value\n break\n else:\n continue\n\n file_idlist.append(file_id)\n return render_template('audio_description.html', files=files, entryids=entryids, length=length, file_idlist=file_idlist, listofdicts=listofdicts, dictchoice=dictchoice, finaldict=finaldict)\n\n\n\n###########\n#controllers fill video id field in 3play for captions\n@app.route('/captions1', methods=['GET', 'POST'] )\ndef captions1():\n form = SupaPlayaMaka()\n if request.method == 'POST':\n return redirect(url_for('captions2'))\n elif request.method == 'GET':\n return render_template('captions1.html', form=form)\n\n@app.route('/captions2', methods=['GET', 'POST'] )\ndef captions2():\n form = SupaPlayaMaka()\n # gets files from form\n files = request.form.getlist('captionfile')\n # strips the file extension from the file input and makes new list\n newfiles = []\n for f in files:\n newf = os.path.splitext(f)[0]\n newfiles.append(newf)\n\n # issues get request to 3play api to get the id# of the named file\n file_idlist = []\n for f in newfiles:\n caption_filename = f\n url_g = 'http://api.3playmedia.com/files?apikey=qPkBhpMQzvvZFJqbAw5MgaWwVMmUZtRX&q=name=%s' % caption_filename\n g = requests.get(url_g)\n response = g.text\n listofdicts = json.loads(response)\n dictchoice = listofdicts['files']\n finaldict = dictchoice[0]\n\n for key, value in finaldict.items():\n if key == 'id':\n file_id = value\n break\n else:\n continue\n\n file_idlist.append(file_id)\n\n # issues the put request, using the id# to populate the video id\n i = 0\n for f in newfiles:\n caption_filename = f\n file_id = file_idlist[i]\n url_p = 'http://api.3playmedia.com/files/%s' % file_id\n params_p = {'apikey':'qPkBhpMQzvvZFJqbAw5MgaWwVMmUZtRX', 'api_secret_key':'dMkGa_CVlIjL8clh3I3bPfH0EQrgp_w7', '_method':'PUT', 'video_id':'%s' % caption_filename}\n p = requests.put(url_p, params=params_p)\n i += 1\n\n return render_template('captions2.html', newfiles=newfiles)\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=False)\n", "sub_path": "routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 7795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.form.getlist", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 99, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.request.form.getlist", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 144, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 162, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 167, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 181, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 190, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 190, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 192, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 192, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 193, "usage_type": "call"}, {"api_name": "forms.SupaPlayaMaka", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.request.form.getlist", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 211, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 213, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 233, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "632311924", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nTI CC2650 SensorTag\n-------------------\n\nAdapted by Ashwin from the following sources:\n - https://github.com/IanHarvey/bluepy/blob/a7f5db1a31dba50f77454e036b5ee05c3b7e2d6e/bluepy/sensortag.py\n - https://github.com/hbldh/bleak/blob/develop/examples/sensortag.py\n\n\"\"\"\nimport os\nimport asyncio\nimport platform\nimport struct\nfrom client import setup, send_data\n\nfrom aioconsole import ainput\nfrom bleak import BleakClient\n\nNAME = \"TNG\"\nDURATION = 20\nADDRESS = \"54:6C:0E:B7:82:82\"\nTOPIC = \"aegis/gesture\"\n\nGESTURE_DELAY = 1\n\n\nclass Service:\n \"\"\"\n Here is a good documentation about the concepts in ble;\n https://learn.adafruit.com/introduction-to-bluetooth-low-energy/gatt\n\n In TI SensorTag there is a control characteristic and a data characteristic which define a service or sensor\n like the Light Sensor, Humidity Sensor etc\n\n Please take a look at the official TI user guide as well at\n https://processors.wiki.ti.com/index.php/CC2650_SensorTag_User's_Guide\n \"\"\"\n\n def __init__(self):\n self.data_uuid = None\n self.ctrl_uuid = None\n self.period_uuid = None\n\n async def read(self, client):\n raise NotImplementedError()\n\n\nclass Sensor(Service):\n\n def callback(self, sender: int, data: bytearray):\n raise NotImplementedError()\n\n async def enable(self, client, *args):\n # start the sensor on the device\n write_value = bytearray([0x01])\n await client.write_gatt_char(self.ctrl_uuid, write_value)\n write_value = bytearray([0x0A]) # check the sensor period applicable values in the sensor tag guide mentioned above\n await client.write_gatt_char(self.period_uuid, write_value)\n\n return self\n\n async def read(self, client):\n val = await client.read_gatt_char(self.data_uuid)\n return self.callback(1, val)\n\n\n\nclass BatteryService(Service):\n def __init__(self):\n super().__init__()\n self.data_uuid = \"00002a19-0000-1000-8000-00x805f9b34fb\"\n\n async def read(self, client):\n val = await client.read_gatt_char(self.data_uuid)\n return int(val[0])\n \n \nclass MovementSensorMPU9250SubService:\n\n def __init__(self):\n self.bits = 0\n\n def enable_bits(self):\n return self.bits\n\n def cb_sensor(self, data):\n raise NotImplementedError\n \n\nclass MovementSensorMPU9250(Sensor):\n GYRO_XYZ = 7\n ACCEL_XYZ = 7 << 3\n MAG_XYZ = 1 << 6\n ACCEL_RANGE_2G = 0 << 8\n ACCEL_RANGE_4G = 1 << 8\n ACCEL_RANGE_8G = 2 << 8\n ACCEL_RANGE_16G = 3 << 8\n\n def __init__(self):\n super().__init__()\n self.data_uuid = \"f000aa81-0451-4000-b000-000000000000\"\n self.ctrl_uuid = \"f000aa82-0451-4000-b000-000000000000\"\n self.ctrlBits = 0\n\n self.sub_callbacks = []\n\n def register(self, cls_obj: MovementSensorMPU9250SubService):\n self.ctrlBits |= cls_obj.enable_bits()\n self.sub_callbacks.append(cls_obj.cb_sensor)\n\n async def start_listener(self, client, *args):\n # start the sensor on the device\n await client.write_gatt_char(self.ctrl_uuid, struct.pack(\" self.obj_b.x and y < (self.obj_b.y + self.obj_b.height) and y > self.obj_b.y):\n return True\n return False\n\n def on_mouse_press(self, x, y, button, modifiers):\n if self.mouse_on_sprite(x, y):\n if self.name == 'acid' and barr[self.name] == 1:\n if inv['scarf'] == 1:\n self.delete_from_screen()\n self.new_sprite()\n barr[self.name] = 0\n self.add(MessageBox(\"acid_scrf\", 40, 400, 120))\n else:\n self.add(MessageBox(\"no_scarf_warning\", 40, 200, 120))\n if self.name == 'safe' and barr[self.name] == 1:\n if inv['paper'] == 1:\n self.delete_from_screen()\n self.new_sprite()\n barr[self.name] = 0\n barr['key'] = 0\n self.add(MessageBox(\"safe_open\", 40, 400, 120))\n self.scene.add(ItemInv(1490, 200, \"Resources/key.png\", \"key\", 0.15))\n else:\n self.add(MessageBox(\"safe\", 40, 400, 120))\n\n def delete_from_screen(self):\n \"\"\"Удаление картинки\"\"\"\n hide = cocos.actions.FadeOut(3)\n self.obj_b.do(hide)\n\n def new_sprite(self):\n \"\"\"Проявление картинки\"\"\"\n show = cocos.actions.FadeIn(1)\n self.obj_g.do(show)\n\nclass MessageAcionLayer(cocos.layer.Layer):\n \"\"\"\n Объект перехода на следующий уровень\n Направление: напрво\n \"\"\"\n\n is_event_handler = True\n def __init__(self, x, y, texture, message, pos_x, pos_y, font_size):\n self.w = x\n self.h = y\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.texture = texture\n self.message = message\n self.font_size = font_size\n self.flag = True\n super().__init__()\n\n # Загрузка и установка изображения\n self.obj = cocos.sprite.Sprite(self.texture, anchor = (0,0))\n self.obj.position = self.w, self.h\n self.add(self.obj)\n\n def mouse_on_sprite(self, x, y):\n \"\"\"Метод проверки курсора на попадание по объекту\"\"\"\n if (x < (self.obj.x + self.obj.width) and x > self.obj.x and y < (self.obj.y + self.obj.height) and y > self.obj.y):\n return True\n return False\n\n def on_mouse_press(self, x, y, button, modifiers):\n \"\"\"Метод осуществления перехода к выбранной сцене\"\"\"\n if button & mouse.LEFT:\n if self.mouse_on_sprite(x,y):\n self.add(MessageBox(self.message, self.font_size, self.pos_x, self.pos_y))\n", "sub_path": "texture_tools.py", "file_name": "texture_tools.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cocos.sprite", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cocos.layer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cocos.sprite.Sprite", "line_number": 38, "usage_type": "call"}, {"api_name": "cocos.sprite", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cocos.sprite.Sprite", "line_number": 43, "usage_type": "call"}, {"api_name": "cocos.sprite", "line_number": 43, "usage_type": "attribute"}, {"api_name": "inventory.inv", "line_number": 62, "usage_type": "name"}, {"api_name": "inventory.MessageBox", "line_number": 66, "usage_type": "call"}, {"api_name": "inventory.MessageBox", "line_number": 68, "usage_type": "call"}, {"api_name": "inventory.inv", "line_number": 70, "usage_type": "name"}, {"api_name": "inventory.MessageBox", "line_number": 75, "usage_type": "call"}, {"api_name": "inventory.ItemInv", "line_number": 76, "usage_type": "call"}, {"api_name": "inventory.MessageBox", "line_number": 78, "usage_type": "call"}, {"api_name": "cocos.actions.FadeOut", "line_number": 82, "usage_type": "call"}, {"api_name": "cocos.actions", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cocos.actions.FadeIn", "line_number": 87, "usage_type": "call"}, {"api_name": "cocos.actions", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cocos.layer", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cocos.sprite.Sprite", "line_number": 109, "usage_type": "call"}, {"api_name": "cocos.sprite", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pyglet.window.mouse.LEFT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pyglet.window.mouse", "line_number": 121, "usage_type": "name"}, {"api_name": "inventory.MessageBox", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "616318136", "text": "#!/usr/bin/env python\n\nimport http.client, json, threading\nfrom html.parser import HTMLParser\n\nind_lock = threading.Lock()\n\nclass OBDCodeParser(HTMLParser):\n def __init__(self, code_c):\n self.codes = []\n self.file = code_c + \"_codes.json\"\n self.scans = 0\n\n super().__init__()\n\n def save_file(self):\n with open(self.file, \"w\") as f:\n json.dump(self.codes, f, indent=2)\n\n def close(self):\n self.save_file()\n super().close()\n\n def feed(self, data):\n global codes\n global ind_lock\n\n self.status = 0\n self.parse = False\n self.list_type = 0\n self.scans += 1\n\n if self.scans % 100 == 0:\n self.save_file()\n\n with ind_lock:\n self.ind = len(self.codes)\n self.codes.append({})\n\n super().feed(data)\n\n def handle_starttag(self, tag, attr):\n # found code number / name\n if self.status == 0 and (\"class\", \"code\") in attr:\n self.status = 1\n self.parse = True\n # found either causes or symptoms\n elif self.status == 1 and tag == \"h2\":\n self.status = 2\n # causes/symptoms data elements\n elif self.status == 2 and tag == \"li\":\n self.parse = True\n # description data\n elif self.status >= 1 and tag == \"main\":\n self.status = 3\n self.parse = True\n\n def handle_comment(self, data):\n # all data has been processed\n if \"Content Ends\" in data:\n self.status = -1\n\n def handle_endtag(self, tag):\n # close description data\n if self.status == 3 and tag == \"main\":\n self.status = -1\n self.parse = False\n return\n\n def handle_data(self, data):\n if self.parse:\n # strip newlines and tabs but leave space characters\n data = data.strip(\"\\n\\t\\r\")\n if not data:\n return\n\n if self.status == 1:\n # if code doesn't exist\n if data == \" - \":\n self.codes.pop(self.ind)\n self.status = -1\n return\n\n\n self.codes[self.ind][\"Code\"], self.codes[self.ind][\"Name\"] = data.split(\" - \", 1)\n self.parse = False\n elif self.status == 2:\n if self.list_type == 1:\n if not \"Causes\" in self.codes[self.ind]:\n self.codes[self.ind][\"Causes\"] = []\n\n self.codes[self.ind][\"Causes\"].append(data)\n elif self.list_type == 2:\n if not \"Symptoms\" in self.codes[self.ind]:\n self.codes[self.ind][\"Symptoms\"] = []\n\n self.codes[self.ind][\"Symptoms\"].append(data)\n self.parse = False\n elif self.status == 3:\n if not \"Description\" in self.codes[self.ind]:\n self.codes[self.ind][\"Description\"] = \"\"\n\n self.codes[self.ind][\"Description\"] += data\n elif self.status == 2:\n # check whether the found element is causes or symptoms\n if \"causes\" in data:\n self.list_type = 1\n elif \"symptoms\" in data:\n self.list_type = 2\n elif \"makes\" in data:\n self.list_type = 0\n\ndef scrape_domain(domain):\n p_code_range = [(0x0000, 0x1000), (0x2000, 0x3000), (0x3400, 0x3A00)]\n c_code_range = [(0x0000, 0x1000), (0x3000, 0x4000)]\n b_code_range = c_code_range\n u_code_range = c_code_range\n\n # p_code_ns_range = [(0x1000, 0x2000), (0x3000, 0x3400)]\n # c_code_ns_range = [(0x1000, 0x3000)]\n # b_code_ns_range = c_code_ns_range\n # u_code_ns_range = c_code_ns_range\n\n # setup threading\n threads = []\n code_cs = \"PCBU\"\n range_cs = [p_code_range, c_code_range, b_code_range, u_code_range]\n\n for i in range(len(code_cs)):\n threads.append(threading.Thread(\n target=scrape_codes, args=(domain, code_cs[i], range_cs[i])))\n threads[i].start()\n\n for t in threads:\n t.join()\n\ndef scrape_codes(domain, code_c, code_rs):\n conn = http.client.HTTPSConnection(domain)\n parser = OBDCodeParser(code_c)\n\n for r in code_rs:\n for x in range(r[0], r[1]):\n parse_page(conn, parser, \"{}{:04x}\".format(code_c.lower(), x))\n print(\"Parsing code {}{:04X}\".format(code_c, x))\n\n parser.close()\n conn.close()\n\ndef parse_page(conn, parser, code):\n conn.request(\"GET\", \"/\" + code + \".html\")\n\n resp = conn.getresponse()\n # if response isn't okay then don't try to parse the data\n if resp.status != 200:\n print(\"Bad response at code \", code)\n return\n\n try:\n parser.feed(resp.read().decode())\n except UnicodeDecodeError as e:\n print(\"Decode error at code \", code)\n print(e)\n\ndef main():\n scrape_domain(\"www.autocodes.com\")\n\nif __name__ == '__main__':\n main()\n", "sub_path": "obdcodes.py", "file_name": "obdcodes.py", "file_ext": "py", "file_size_in_byte": 4989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "threading.Lock", "line_number": 6, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 8, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 18, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 130, "usage_type": "call"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 138, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 138, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "488265656", "text": "import pathlib\nfrom textwrap import dedent\nimport os\nimport shutil\nimport tempfile\nimport pytest\nfrom click.testing import CliRunner\nfrom git import Repo, Actor\n\nimport wily.__main__ as main\n\n\n@pytest.fixture\ndef gitdir(tmpdir):\n \"\"\" Create a project and add code to it \"\"\"\n repo = Repo.init(path=tmpdir)\n tmppath = pathlib.Path(tmpdir)\n testpath = tmppath / \"src\" / \"test.py\"\n (tmppath / \"src\").mkdir()\n # Write a test file to the repo\n with open(testpath, \"w\") as test_txt:\n test_txt.write(\"import abc\")\n\n index = repo.index\n index.add([str(testpath)])\n\n author = Actor(\"An author\", \"author@example.com\")\n committer = Actor(\"A committer\", \"committer@example.com\")\n\n index.commit(\"basic test\", author=author, committer=committer)\n\n first_test = \"\"\"\n import abc\n foo = 1\n def function1():\n a = 1 + 1\n \n class Class1(object):\n def method(self):\n b = 1 + 5\n \"\"\"\n with open(testpath, \"w\") as test_txt:\n test_txt.write(dedent(first_test))\n\n index.add([str(testpath)])\n index.commit(\"add line\", author=author, committer=committer)\n\n second_test = \"\"\"\n import abc\n foo = 1\n def function1():\n a = 1 + 1\n class Class1(object):\n def method(self):\n b = 1 + 5\n if b == 6:\n return 'banana'\n \"\"\"\n\n with open(testpath, \"w\") as test_txt:\n test_txt.write(dedent(second_test))\n\n index.add([str(testpath)])\n index.commit(\"remove line\", author=author, committer=committer)\n\n yield tmpdir\n repo.close()\n\n\n@pytest.fixture\ndef builddir(gitdir):\n \"\"\"\n A directory with a wily cache\n \"\"\"\n tmppath = pathlib.Path(gitdir)\n runner = CliRunner()\n result1 = runner.invoke(\n main.cli, [\"--debug\", \"--path\", gitdir, \"build\", str(tmppath / \"src\")]\n )\n assert result1.exit_code == 0, result1.stdout\n\n result2 = runner.invoke(main.cli, [\"--debug\", \"--path\", gitdir, \"index\"])\n assert result2.exit_code == 0, result2.stdout\n\n return gitdir\n\n\n@pytest.fixture(autouse=True)\ndef cache_path(monkeypatch):\n \"\"\"\n Configure wily cache and home path, clean up cache afterward\n \"\"\"\n tmp = tempfile.mkdtemp()\n monkeypatch.setenv(\"HOME\", tmp)\n yield tmp\n shutil.rmtree(tmp)\n", "sub_path": "test/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "git.Repo.init", "line_number": 16, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "git.Actor", "line_number": 27, "usage_type": "call"}, {"api_name": "git.Actor", "line_number": 28, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 43, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 61, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 75, "usage_type": "call"}, {"api_name": "click.testing.CliRunner", "line_number": 76, "usage_type": "call"}, {"api_name": "wily.__main__.cli", "line_number": 78, "usage_type": "attribute"}, {"api_name": "wily.__main__", "line_number": 78, "usage_type": "name"}, {"api_name": "wily.__main__.cli", "line_number": 82, "usage_type": "attribute"}, {"api_name": "wily.__main__", "line_number": 82, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 93, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 96, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "206270646", "text": "import matplotlib.pyplot as plt\n\ndef segment(xa, ya, xb, yb):\n return ((xa, ya), (xb, yb))\n\ndef plot_segments(plot, segments):\n for s in segments:\n a, b = s\n xa, ya = a\n xb, yb = b\n plot.plot([xa, xb], [ya, yb], c=\"g\")\n\ndef read_from(filename):\n segments = []\n with open(filename, \"r\") as f:\n while True:\n numbers = list(map(float, f.readline().split()))\n if numbers == []:\n break\n segments.append(segment(*numbers))\n\n return segments\n\n_, plts = plt.subplots(1, 3, sharex=True)\n\nplot_segments(plts[0], read_from(\"data/input\"))\nplot_segments(plts[1], read_from(\"data/func\"))\nplot_segments(plts[2], read_from(\"data/output\"))\nplt.show()\n", "sub_path": "20201/decision-support-systems/refs/Cuoi ky/Code/example2/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "382320276", "text": "import httplib\nimport re\n\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# we accept the long URLs shown in the location bar or the short versions\n# produced by the Share button\nYOUTUBE_URL_RE = re.compile(r'^https?://(?:www.youtube.com/watch\\?v=|youtu.be/)(?P[-\\w]{11}).*$')\nYOUTUBE_EMBED_CODE_RE = re.compile(r'[-\\w]{11})\".*$')\nYOUTUBE_EMBEDDING_TEMPLATE = (\n ''\n)\n\nVIMEO_URL_RE = re.compile(r'^https?://(?:(?:player|www)\\.)?vimeo.com/(?:video/)?(?P\\d+)/?$')\nVIMEO_EMBED_CODE_RE = re.compile(r'\\d+)\".*$')\nVIMEO_EMBEDDING_TEMPLATE = (\n ''\n)\n\ndef get_video_embedding_code(url):\n \"\"\"\n Returns valid code to embed a Vimeo or YouTube video, or a placeholder.\n \"\"\"\n output = mark_safe('
%s
' % unicode(_('This video is not available.')))\n\n video_id = None\n try:\n video_id = get_vimeo_video_id(url)\n output = VIMEO_EMBEDDING_TEMPLATE % video_id\n except ValueError:\n log.debug(\"NOT VIMEO URL: %s\" % url)\n try:\n video_id = get_youtube_video_id(url)\n output = YOUTUBE_EMBEDDING_TEMPLATE % video_id\n except ValueError:\n pass\n\n return output\n\ndef get_vimeo_video_id(value):\n valid_url = VIMEO_URL_RE.search(value)\n if valid_url:\n return valid_url.groupdict()['video_id']\n\n valid_embed_code = VIMEO_EMBED_CODE_RE.search(value)\n if valid_embed_code:\n return valid_embed_code.groupdict()['video_id']\n\n raise ValueError('Supplied URL is not valid Vimeo embedding code, or the video ID is incorrect.')\n\ndef get_youtube_video_id(value):\n valid_url = YOUTUBE_URL_RE.search(value)\n if valid_url:\n return valid_url.groupdict()['video_id']\n\n valid_embed_code = YOUTUBE_EMBED_CODE_RE.search(value)\n if valid_embed_code:\n return valid_embed_code.groupdict()['video_id']\n\n raise ValueError('Supplied URL is not valid YouTube embedding code, or the video ID is incorrect.')\n\ndef is_valid_youtube_video(video_id):\n \"\"\"\n Try to verify that a given URL actually points to a valid video on YouTube.\n \"\"\"\n log.debug(\"validating youtube url: %s\" % video_id)\n c = httplib.HTTPConnection('gdata.youtube.com')\n c.request('HEAD', '/feeds/api/videos/' + video_id)\n r = c.getresponse()\n if r.status == 200:\n return True\n return False\n", "sub_path": "src/cpi/apps/attachments/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 2968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 42, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 42, "usage_type": "call"}, {"api_name": "httplib.HTTPConnection", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "305791096", "text": "import cv2\r\nimport numpy as np\r\nfrom time import perf_counter\r\ncap = cv2.VideoCapture(0)\r\nfps = cap.get(cv2.CAP_PROP_FPS)\r\nheight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\nscale_fact = 1;\r\n\r\nsegment_count = fps*3\r\n\r\nsegment_height = int(height*scale_fact/segment_count)\r\n\r\nprint(\"segment count:\", segment_count, \"\\nscaling factor:\", scale_fact, \"\\nsegment_height\",segment_height)\r\nframes = []\r\nt1 = perf_counter()\r\nwhile(cap.isOpened()):\r\n ret, new_frame = cap.read()\r\n if new_frame is None:\r\n break\r\n \r\n if scale_fact != 1:\r\n new_frame = cv2.resize(new_frame,\r\n (int(new_frame.shape[1]*scale_fact),\r\n int(new_frame.shape[0]*scale_fact)))\r\n frames.append(new_frame)\r\n if len(frames) >= segment_count: \r\n segments = []\r\n for i,frame in enumerate(frames):\r\n segments.append(frame[i*segment_height:(i+1)*segment_height])\r\n\r\n noodled_frame = np.concatenate(segments, axis=0)\r\n\r\n frames.pop(0)\r\n cv2.imshow('frame', noodled_frame)\r\n t2 = perf_counter()\r\n delay = int(1000/fps - (t2-t1)*1000)\r\n delay = delay if delay >1 else 1\r\n if cv2.waitKey(delay) & 0xFF == ord('q'):\r\n break\r\n t1 = perf_counter()\r\n \r\ncap.release()\r\ncv2.destroyAllWindows()\r\n", "sub_path": "noodle_dance.py", "file_name": "noodle_dance.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 6, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "475561533", "text": "#!/bin/bash env python3\n#multicolored_lines.py\n#Tim Tyree\n#5.10.2021\n# forked fromhttps://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/multicolored_line.html\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\n\nfrom ..measure._utils_find_contours import split_contour_into_contiguous\n\ndef plotMulticoloredLine(fig,ax,x_values,y_values,c_values,cmap='coolwarm',use_colorbar=True,vmin=None,vmax=None,alpha=1.,lw=2):\n '''x_values,y_values,c_values are each 1-by-N numpy arrays.'''\n #define the relevant segments\n points = np.array([x_values, y_values]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n # Create a continuous norm to map from data points to colors\n if vmin is None:\n vmin=c_values.min()\n if vmax is None:\n vmax=c_values.max()\n norm = plt.Normalize(vmin, vmax)\n lc = LineCollection(segments, cmap=cmap, norm=norm, alpha=alpha)\n # lc = LineCollection(segments, cmap='hot', norm=norm)\n # Set the values used for colormapping\n lc.set_array(c_values)\n lc.set_linewidth(lw)\n line = ax.add_collection(lc)\n if use_colorbar:\n fig.colorbar(line, ax=ax)\n\n # # Use a boundary norm instead\n # cmap = ListedColormap(['r', 'k', 'b'])\n # norm = BoundaryNorm([-1, -0.5, 0.5, 1], cmap.N)\n # lc = LineCollection(segments, cmap=cmap, norm=norm)\n # lc.set_array(dydx)\n # lc.set_linewidth(2)\n # line = ax.add_collection(lc)\n # if use_colorbar:\n # fig.colorbar(line, ax=ax)\n return None\n\ndef plotColoredContour(fig,ax,xy_values_lst,c_values_lst,\n cmap='hot',use_colorbar=False,\n vmin=0.,vmax=10.,lw=3,navg=20,alpha=1.):\n for i in range(len(c_values_lst)):\n c_values=np.abs(c_values_lst[i])#.copy()\n #compute moving average of c_values\n # for k in range(navg):\n # c_values[1:]=(c_values[1:]+c_values[:-1])/2.\n c_lst=[]\n for j in range(c_values.shape[0]):\n c_lst.append(np.mean(c_values[j:j+navg]))\n c_values=np.array(c_lst)\n xy_values=xy_values_lst[i]\n contour_lst = split_contour_into_contiguous(xy_values)\n for contour in contour_lst:\n x_values=contour[:,0]\n y_values=contour[:,1]\n plotMulticoloredLine(fig,ax,\n x_values,\n y_values,\n c_values,\n cmap=cmap,\n use_colorbar=use_colorbar,\n vmin=vmin,vmax=vmax,lw=lw,alpha=alpha)\n return None\n\nif __name__=='__main__':\n x = np.linspace(0, 3 * np.pi, 500)\n y = np.sin(x)\n dydx = np.cos(0.5 * (x[:-1] + x[1:])) # first derivative\n x_values=np.sin(x)\n y_values=np.cos(x)\n c_values=dydx\n\n # Create a set of line segments so that we can color them individually\n # This creates the points as a N x 1 x 2 array so that we can stack points\n # together easily to get the segments. The segments array for line collection\n # needs to be (numlines) x (points per line) x 2 (for x and y)\n\n fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)\n fig, ax = plotMulticoloredLine(fig,ax,x_values,y_values,c_values,cmap='jet',use_colorbar=True)\n# ax.set_xlim([0,width])\n# ax.set_ylim([0,height])\n ax.set_xlim(x_values.min(), x_values.max())\n ax.set_ylim(y_values.min(), y_values.max())\n plt.show()\n", "sub_path": "notebooks/lib/viewer/multicolored_lines.py", "file_name": "multicolored_lines.py", "file_ext": "py", "file_size_in_byte": 3566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "measure._utils_find_contours.split_contour_into_contiguous", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "607126115", "text": "import multiprocessing\nimport pandas as pd\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport time\nfrom tabulate import tabulate\nfrom sklearn.decomposition import TruncatedSVD\nfrom KNN import k_nn\n\n\ndef knn(train_X, train_y, test_X, distance, p=0):\n return k_nn(train_X, train_y, test_X, distance, p)\n\n\ndef run_parallel_knn(train_data_x, train_data_y, validation_x):\n n_cores = multiprocessing.cpu_count()\n return Parallel(n_jobs=n_cores)(delayed(knn)(train_data_x, train_data_y, test_point)\n for _, test_point in enumerate(validation_x))\n\n\ndef get_accuracy(predicted, actual):\n \"\"\"Returns the accuracy of the predicted values, compared to the actual ones.\n The accuracy is in the range [0,1]. Where as 0 indicates \"no values of the predicted list are equal to those of\n actual\", and 1 indicates \"all values are equal\".\n \"\"\"\n\n diff = 0\n for i in range(len(predicted)):\n diff += (predicted[i] != actual[i])\n\n return 1 - diff / len(predicted)\n\n\ndef exercise_a():\n \"\"\"\n Fetches the small training set and uses the K-mean algorithm to predict\n the test data set. The k parameter is increased from 1 to 20. Finally\n a table with the results are printed.\n :return: none\n \"\"\"\n result = []\n highest_index = -1\n predicted = run_parallel_knn(train_data_x, train_data_y, test_data_x)\n\n # run through all ks from 1 to 20, compare the output to calculate the accuracy\n # and put it in the result list.\n for k in range(1, 21):\n accuracy = get_accuracy(list(map(lambda x: x.get_prediction(k), predicted)), test_data_y)\n\n if highest_index == -1 or result[highest_index][1] < accuracy:\n highest_index = k - 1\n\n result.append(\n # k | accuracy | dummy value\n [k, accuracy, 0])\n\n # print a tabulate with the fetched information\n result[highest_index][2] = 1 # set the dummy value to 1, indicating this index is the highest\n print(tabulate(result, headers=['K', 'Accuracy', 'Is Highest']))\n\n\ndef exercise_b():\n correct = [0 for _ in range(20)]\n\n # combine the test and training set\n joined_train_x = np.concatenate((train_data_x, test_data_x), axis=0)\n joined_train_y = np.concatenate((train_data_y, test_data_y), axis=0)\n\n # now, for every item of this combined set, verify if the k-means algorithm predicts a good label\n for i in range(len(joined_train_x)):\n\n # save the to-be removed value in dummy variables\n dummy_x = joined_train_x[i]\n dummy_y = joined_train_y[i]\n\n # remove the dummy variables\n train_x = np.delete(joined_train_x, i, 0)\n train_y = np.delete(joined_train_y, i)\n\n # runs the the knn algorithm\n predicted = knn(train_x, train_y, dummy_x, 'euclidean')\n\n # count for every k setting if the predicted label is equal to the real one\n for k in range(1, 21):\n correct[k-1] += predicted.get_prediction(k) == dummy_y\n\n max_correct_value = max(correct)\n print(tabulate(\n [[k, correct[k-1] / joined_train_x.shape[0], int(correct[k-1] == max_correct_value)] for k in range(1,21)],\n headers=['K', 'Accuracy', 'Is Highest']))\n \n \n#not sure whether the cross validation should be done on only the training set \ndef exercise_b_without_test_set():\n correct = [0 for _ in range(20)]\n\n # now, for every item of the training set, verify if the k-means algorithm predicts a good label\n for i in range(len(train_data_x)):\n\n # save the to-be removed value in dummy variables\n dummy_x = train_data_x[i]\n dummy_y = train_data_y[i]\n\n # remove the dummy variables\n train_x = np.delete(train_data_x, i, 0)\n train_y = np.delete(train_data_y, i)\n\n # runs the the knn algorithm\n predicted = knn(train_x, train_y, dummy_x, 'euclidean')\n\n # count for every k setting if the predicted label is equal to the real one\n for k in range(1, 21):\n correct[k-1] += predicted.get_prediction(k) == dummy_y\n\n max_correct_value = max(correct)\n print(tabulate(\n [[k, correct[k-1] / train_data_x.shape[0], int(correct[k-1] == max_correct_value)] for k in range(1,21)],\n headers=['K', 'Accuracy', 'Is Highest']))\n \n\ndef exercise_c():\n optimal_p, optimal_k, highest_accuracy = 0, 0, 0\n for p in range(15):\n correct = [0 for _ in range(20)]\n\n # now, for every item of the training set, verify if the k-means algorithm predicts a good label\n for i in range(len(train_data_x)):\n\n # save the to-be removed value in dummy variables\n dummy_x = train_data_x[i]\n dummy_y = train_data_y[i]\n\n # remove the dummy variables\n train_x = np.delete(train_data_x, i, 0)\n train_y = np.delete(train_data_y, i)\n\n # runs the the knn algorithm\n predicted = knn(train_x, train_y, dummy_x, 'minkowski', p+1)\n\n # count for every k setting if the predicted label is equal to the real one\n for k in range(1, 21):\n correct[k-1] += predicted.get_prediction(k) == dummy_y\n \n max_correct_value = max(correct)\n \n current_accuracy = correct[k-1] / train_data_x.shape[0]\n \n if(current_accuracy > highest_accuracy):\n optimal_p = p + 1\n optimal_k = k\n highest_accuracy = current_accuracy\n \n print(('------p={p_value}------').format(p_value=p+1))\n print(tabulate(\n [[k, correct[k-1] / train_data_x.shape[0], int(correct[k-1] == max_correct_value)] for k in range(1,21)],\n headers=['K', 'Accuracy', 'Is Highest']))\n print('------------------------\\n')\n \n print('OPTIMAL VALUES FOR P AND K: {opt_p} and {opt_k} with an accuracy of {accu}'.format(opt_p=optimal_p, opt_k=optimal_k, accu=highest_accuracy))\n\n\ndef exercise_g(n_components=18):\n global train_data_x\n global test_data_x\n\n svd = TruncatedSVD(n_components=n_components)\n svd.fit(train_data_x)\n train_data_x = svd.transform(train_data_x)\n test_data_x = svd.transform(test_data_x)\n\n \nif __name__ == '__main__':\n train_data_x = np.repeat(pd.read_csv(\"MNIST_train_small.csv\").to_numpy()[:, 1:], repeats=1, axis=0)\n train_data_y = np.repeat(pd.read_csv(\"MNIST_train_small.csv\").to_numpy()[:, 0], repeats=1, axis=0)\n test_data_x = np.repeat(pd.read_csv(\"MNIST_test_small.csv\").to_numpy()[:, 1:], repeats=1, axis=0)\n test_data_y = np.repeat(pd.read_csv(\"MNIST_test_small.csv\").to_numpy()[:, 0], repeats=1, axis=0)\n t0 = time.time()\n\n exercise_c()\n print(time.time() - t0)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "KNN.k_nn", "line_number": 12, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 16, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 17, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 17, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 78, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 106, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 135, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 176, "usage_type": "call"}, {"api_name": "time.time", "line_number": 177, "usage_type": "call"}, {"api_name": "time.time", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "527575930", "text": "from locust import HttpUser, task, TaskSet, events, constant\nimport time, sys\nimport os\n\nclass UserBehavior(HttpUser):\n\n @task(1)\n def test_get(self):\n data = {\n \"requestId\": \"303fe1ca-9d76-45e3-ac3e-41f6aa35a994\",\n \"createTime\": 0,\n \"generator\": 1,\n \"type\": 1,\n \"fromUser\": {\n \"nickName\": \"shilidia122222\",\n \"role\": 1,\n \"openId\": \"nntOEKMDKwm1Qk6Tevy8GHgGa2ZtWg5t12f0irch1121\"\n },\n \"toUser\": {\n \"nickName\": \"wufei\",\n \"role\": 2\n },\n # \"transferList\": None,\n \"context\": {\n \"sellerId\": 1901625824,\n \"itemId\": 0,\n \"orderId\": 0,\n \"orderStatus\": 0,\n \"assistantOnlineStatus\": 0\n },\n \"message\": {\n \"contentType\": 1,\n \"content\": \"{\\\"text\\\":\\\"包装太差\\\"}\"\n }\n }\n print(111111111111111111)\n url = \"https://wangcai-test-ks.xiaoduoai.com/spi/chatbotevent\"\n res = self.client.post(url=url, json=data)\n print(res.json())\n\n\nclass WebUser(TaskSet):\n \"\"\"性能测试配置 换算配置\"\"\"\n host = \"\"\n tasks = [UserBehavior]\n wait_time = constant(1)\n\n\nif __name__ == '__main__':\n os.system('locust -f ./test_4.py -u 20 -r 1 --host https://wangcai-test-ks.xiaoduoai.com/ --web-host=127.0.0.1') # 试试\n", "sub_path": "test_4locust.py", "file_name": "test_4locust.py", "file_ext": "py", "file_size_in_byte": 1474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "locust.HttpUser", "line_number": 5, "usage_type": "name"}, {"api_name": "locust.task", "line_number": 7, "usage_type": "call"}, {"api_name": "locust.TaskSet", "line_number": 42, "usage_type": "name"}, {"api_name": "locust.constant", "line_number": 46, "usage_type": "call"}, {"api_name": "os.system", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "145270068", "text": "import os\n\nfrom telebot import types\nimport flask\n\nfrom app.bot_handlers import bot\nfrom app.config import settings\n\nserver = flask.Flask(__name__)\n\n\n@server.route('/' + settings.TOKEN, methods=['POST'])\ndef get_message():\n bot.process_new_updates(\n [types.Update.de_json(flask.request.stream.read().decode('utf-8'))]\n )\n return '!', 200\n\n\n@server.route('/', methods=['GET'])\ndef index():\n bot.remove_webhook()\n bot.set_webhook(\n url='https://{}.herokuapp.com/{}'.format(settings.APP_NAME, settings.TOKEN)\n )\n return 'Hello from Heroku!', 200\n\n\nif __name__ == '__main__':\n bot.remove_webhook()\n # bot.polling(none_stop=True) use to local run\n server.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))\n", "sub_path": "run_server.py", "file_name": "run_server.py", "file_ext": "py", "file_size_in_byte": 754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "app.bot_handlers.bot.process_new_updates", "line_number": 14, "usage_type": "call"}, {"api_name": "app.bot_handlers.bot", "line_number": 14, "usage_type": "name"}, {"api_name": "telebot.types.Update.de_json", "line_number": 15, "usage_type": "call"}, {"api_name": "telebot.types.Update", "line_number": 15, "usage_type": "attribute"}, {"api_name": "telebot.types", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.stream.read", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.config.settings.TOKEN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "app.bot_handlers.bot.remove_webhook", "line_number": 22, "usage_type": "call"}, {"api_name": "app.bot_handlers.bot", "line_number": 22, "usage_type": "name"}, {"api_name": "app.bot_handlers.bot.set_webhook", "line_number": 23, "usage_type": "call"}, {"api_name": "app.bot_handlers.bot", "line_number": 23, "usage_type": "name"}, {"api_name": "app.config.settings.APP_NAME", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "app.config.settings.TOKEN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.bot_handlers.bot.remove_webhook", "line_number": 30, "usage_type": "call"}, {"api_name": "app.bot_handlers.bot", "line_number": 30, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "296780257", "text": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.29.1\"\n\n\nclass GccConan(ConanFile):\n name = \"gcc\"\n description = \"The GNU Compiler Collection includes front ends for C, \" \\\n \"C++, Objective-C, Fortran, Ada, Go, and D, as well as \" \\\n \"libraries for these languages (libstdc++,...). \"\n topics = (\"gcc\", \"gnu\", \"compiler\", \"c\", \"c++\")\n homepage = \"https://gcc.gnu.org\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"GPL-3.0-only\"\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n _autotools = None\n\n def build_requirements(self):\n self.build_requires(\"flex/2.6.4\")\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n pkgversion = 'conan GCC %s' % self.version\n bugurl = self.url + '/issues'\n libdir = \"%s/lib/gcc/%s\" % (self.package_folder, self.version)\n args = [\n \"--enable-languages=c,c++\",\n \"--disable-nls\",\n \"--disable-multilib\",\n \"--disable-bootstrap\",\n \"--with-system-zlib\",\n \"--with-gmp=%s\" % self.deps_cpp_info['gmp'].rootpath,\n '--with-mpc=%s' % self.deps_cpp_info[\"mpc\"].rootpath,\n \"--with-mpfr=%s\" % self.deps_cpp_info[\"mpfr\"].rootpath,\n \"--without-isl\",\n \"--libdir=%s\" % libdir,\n '--with-pkgversion=%s' % pkgversion,\n \"--program-suffix=-%s\" % self.version,\n \"--with-bugurl=%s\" % bugurl\n ]\n if self.settings.os == \"Macos\":\n xcrun = tools.XCRun(self.settings)\n args.extend([\n '--with-native-system-header-dir=/usr/include',\n \"--with-sysroot={}\".format(xcrun.sdk_path)\n ])\n self._autotools.libs = [] # otherwise causes config.log to fail finding -lmpc\n if self.settings.compiler in [\"clang\", \"apple-clang\"]:\n # xgcc: error: unrecognized command-line option -stdlib=libc++\n if self.settings.compiler.libcxx == \"libc++\":\n self._autotools.cxx_flags.remove(\"-stdlib=libc++\")\n elif self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"]:\n self._autotools.cxx_flags.remove(\"-stdlib=libstdc++\")\n self._autotools.configure(args=args, configure_dir=self._source_subfolder)\n return self._autotools\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def requirements(self):\n self.requires(\"mpc/1.2.0\")\n self.requires(\"mpfr/4.1.0\")\n self.requires(\"gmp/6.2.0\")\n self.requires(\"zlib/1.2.11\")\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Windows builds aren't supported (yet), sorry\")\n if tools.cross_building(self.settings):\n raise ConanInvalidConfiguration(\"no cross-building support (yet), sorry\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"gcc-%s\" % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def _make_args(self):\n if self.settings.os == \"Macos\":\n return [\"BOOT_LDFLAGS=-Wl,-headerpad_max_install_names\"]\n return []\n\n def build(self):\n # If building on x86_64, change the default directory name for 64-bit libraries to \"lib\":\n libdir = \"%s/lib/gcc/%s\" % (self.package_folder, self.version)\n tools.replace_in_file(os.path.join(self.source_folder,\n self._source_subfolder, \"gcc\", \"config\", \"i386\", \"t-linux64\"),\n \"m64=../lib64\", \"m64=../lib\", strict=False)\n # Ensure correct install names when linking against libgcc_s;\n # see discussion in https://github.com/Homebrew/legacy-homebrew/pull/34303\n tools.replace_in_file(os.path.join(self.source_folder,\n self._source_subfolder, \"libgcc\", \"config\", \"t-slibgcc-darwin\"),\n \"@shlib_slibdir@\", libdir, strict=False)\n autotools = self._configure_autotools()\n autotools.make(args=self._make_args)\n\n def package_id(self):\n del self.info.settings.compiler\n\n def package(self):\n autotools = self._configure_autotools()\n if self.settings.build_type == \"Debug\":\n autotools.install(args=self._make_args)\n else:\n autotools.make(args=[\"install-strip\"] + self._make_args)\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.remove_files_by_mask(self.package_folder, \"*.la\")\n self.copy(pattern=\"COPYING*\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : \" + bindir)\n self.env_info.PATH.append(bindir)\n\n cc = os.path.join(bindir, \"gcc-%s\" % self.version)\n self.output.info(\"Creating CC env var with : \" + cc)\n self.env_info.CC = cc\n\n cxx = os.path.join(bindir, \"g++-%s\" % self.version)\n self.output.info(\"Creating CXX env var with : \" + cxx)\n self.env_info.CXX = cxx\n", "sub_path": "recipes/gcc/all/conanfile.py", "file_name": "conanfile.py", "file_ext": "py", "file_size_in_byte": 5428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "conans.ConanFile", "line_number": 8, "usage_type": "name"}, {"api_name": "conans.AutoToolsBuildEnvironment", "line_number": 26, "usage_type": "call"}, {"api_name": "conans.tools.XCRun", "line_number": 46, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 46, "usage_type": "name"}, {"api_name": "conans.errors.ConanInvalidConfiguration", "line_number": 73, "usage_type": "call"}, {"api_name": "conans.tools.cross_building", "line_number": 74, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 74, "usage_type": "name"}, {"api_name": "conans.errors.ConanInvalidConfiguration", "line_number": 75, "usage_type": "call"}, {"api_name": "conans.tools.get", "line_number": 78, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 78, "usage_type": "name"}, {"api_name": "os.rename", "line_number": 80, "usage_type": "call"}, {"api_name": "conans.tools.replace_in_file", "line_number": 91, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "conans.tools.replace_in_file", "line_number": 96, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 96, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "conans.tools.rmdir", "line_number": 111, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "conans.tools.remove_files_by_mask", "line_number": 112, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}]} +{"seq_id": "476547846", "text": "from django import template\n\nregister = template.Library()\n\n@register.simple_tag\ndef query_transform(request, **kwargs):\n updated = request.GET.copy()\n for k, v in kwargs.items():\n updated[k] = v\n\n return updated.urlencode()\n", "sub_path": "customers/templatetags/customer_extras.py", "file_name": "customer_extras.py", "file_ext": "py", "file_size_in_byte": 241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "345887132", "text": "# -*- coding: utf-8 -*-\n# @Author: miana1\n# @Description: File for doing the training and testing for the Riemannian knn\n# and mdm which cannot be saved using pickle.\n# @Date: 2020-02-14 13:15:18\n# @E-mail: ammar.mian@aalto.fi\n# @Last Modified by: miana1\n# @Last Modified time: 2020-02-14 15:12:08\n# ----------------------------------------------------------------------------\n# Copyright 2019 Aalto University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm, trange\nfrom sklearn.model_selection import KFold\nfrom sklearn.utils import shuffle\n\n\n\nif __name__ == '__main__':\n\n # Managing inputs of this script\n parser = argparse.ArgumentParser(description='Compute training and testing for Riemannian knn and mdm')\n parser.add_argument(\"data_file\", help=\"Path (From base) to the file containing machine learning features\")\n parser.add_argument(\"seed\", type=int, help=\"random state seed for shuffling data\")\n parser.add_argument(\"n_folds\", type=int, help=\"Number of folds\")\n parser.add_argument(\"-p\", \"--parallel\", action=\"store_true\",\n help=\"Enable parallel computation\")\n parser.add_argument(\"-j\", \"--n_jobs\", default=8, type=int,\n help=\"Number of jobs for parallel computation\")\n args = parser.parse_args()\n\n # We always need to know where this script is with regards to base of\n # project, so we define these variables to make everything run smoothly\n path_to_base = \"../../\"\n folder_of_present_script = os.path.dirname(os.path.realpath(__file__))\n absolute_base_path = os.path.join(folder_of_present_script, path_to_base)\n path_to_machine_learning_features_data_file = os.path.join(absolute_base_path,\n args.data_file)\n\n # Init paths, and import needed packages\n sys.path.insert(0, absolute_base_path)\n from global_utils import *\n from psdlearning.utils import algebra\n from psdlearning import parsing_methods\n from pyriemann.classification import KNearestNeighbor, MDM\n\n # Read logging configuration\n configure_logging(os.path.join(absolute_base_path, \"logging.yaml\"))\n\n # KNN parameters\n n_neighbors = 5\n metric = 'riemann'\n if args.parallel:\n n_jobs=args.n_jobs\n else:\n n_jobs = 1\n\n\n # Read data from pickle dump\n logging.info(\"Reading machine learning features data from file %s\",\n path_to_machine_learning_features_data_file)\n with open(path_to_machine_learning_features_data_file, 'rb') as f:\n dataset = pickle.load(f)\n\n # Getting train data and formatting it\n logging.info('Getting and formatting training data')\n train_samples = [dataset['features'][i] for i in dataset['indexes train']]\n train_labels = [dataset['labels'][i] for i in dataset['indexes train']]\n X_train = []\n y_train = []\n number_non_spd = 0\n for index in trange(len(train_samples)):\n regions_samples = train_samples[index]\n for sample in regions_samples:\n covariance = algebra.unvech(sample)\n\n # Discarding the non SPD matrices\n if algebra.is_pos_def(covariance):\n X_train.append(covariance)\n y_train.append(train_labels[index])\n else:\n number_non_spd += 1\n if number_non_spd > 0:\n logging.warning(f'There was {number_non_spd} non SPD matrices discarded among the samples')\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n train_samples = None\n train_labels = None\n\n # Getting test data and formatting it\n logging.info('Getting and formatting testing data')\n test_samples = [dataset['features'][i] for i in dataset['indexes test']]\n test_labels = [dataset['labels'][i] for i in dataset['indexes test']]\n dataset = None\n X_test = []\n y_test = []\n number_non_spd = 0\n for index in trange(len(test_samples)):\n regions_samples = test_samples[index]\n for sample in regions_samples:\n covariance = algebra.unvech(sample)\n\n # Discarding the non SPD matrices\n if algebra.is_pos_def(covariance):\n X_test.append(covariance)\n y_test.append(test_labels[index])\n else:\n number_non_spd += 1\n if number_non_spd > 0:\n logging.warning(f'There was {number_non_spd} non SPD matrices discarded among the samples')\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n test_samples = None\n test_labels = None\n\n\n # Doing K-fold splitting\n X_train, y_train = shuffle(X_train, y_train, random_state=args.seed)\n kf = KFold(n_splits=args.n_folds, random_state=args.seed)\n kf.get_n_splits(X_train)\n\n\n # Training for each fold and each method\n logging.info('Doing training')\n clf_knn_k_fold = [] # Container of classifers trained on each fold\n clf_mdm_k_fold = [] # Container of classifers trained on each fold\n accuracy_list_training_knn = []\n accuracy_list_training_mdm = []\n i = 1\n for train_index, test_index in kf.split(X_train):\n\n logging.info(f'Doing fold {i}')\n clf_knn = KNearestNeighbor(n_neighbors, metric, n_jobs)\n clf_mdm = MDM(metric, n_jobs)\n X_train_fold, X_test_fold = X_train[train_index], X_train[test_index]\n y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]\n\n clf_knn.fit(X_train_fold, y_train_fold)\n y_predicted = clf_knn.predict(X_test_fold)\n accuracy = (y_test_fold == y_predicted).sum() / len(y_test_fold)\n clf_knn_k_fold. append(clf_knn)\n accuracy_list_training_knn.append(accuracy)\n\n clf_mdm.fit(X_train_fold, y_train_fold)\n y_predicted = clf_mdm.predict(X_test_fold)\n accuracy = (y_test_fold == y_predicted).sum() / len(y_test_fold)\n clf_mdm_k_fold. append(clf_mdm)\n accuracy_list_training_mdm.append(accuracy)\n\n i += 1\n\n\n # Testing on test dataset\n logging.info('Doing testing')\n accuracy_list_testing_knn = []\n accuracy_list_testing_mdm = []\n X_test, y_test = shuffle(X_test, y_test, random_state=args.seed)\n\n for clf_knn in clf_knn_k_fold:\n y_predicted = clf_knn.predict(X_test)\n accuracy = (y_test == y_predicted).sum() / len(y_test)\n accuracy_list_testing_knn.append(accuracy)\n\n for clf_mdm in clf_mdm_k_fold:\n y_predicted = clf_mdm.predict(X_test)\n accuracy = (y_test == y_predicted).sum() / len(y_test)\n accuracy_list_testing_mdm.append(accuracy)\n\n print('Results Riemannian KNN:')\n print('Training: ' + str(accuracy_list_training_knn))\n print('Testing: ' + str(accuracy_list_testing_knn))\n\n print('\\nResults Riemannian MDM:')\n print('Training: ' + str(accuracy_list_training_mdm))\n print('Testing: ' + str(accuracy_list_testing_mdm))\n", "sub_path": "pedestrian_detection/Scripts/compute_train_test_knn_mdm_riemannian.py", "file_name": "compute_train_test_knn_mdm_riemannian.py", "file_ext": "py", "file_size_in_byte": 7544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 78, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 90, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra.unvech", "line_number": 93, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra", "line_number": 93, "usage_type": "name"}, {"api_name": "psdlearning.utils.algebra.is_pos_def", "line_number": 96, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra", "line_number": 96, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 116, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra.unvech", "line_number": 119, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra", "line_number": 119, "usage_type": "name"}, {"api_name": "psdlearning.utils.algebra.is_pos_def", "line_number": 122, "usage_type": "call"}, {"api_name": "psdlearning.utils.algebra", "line_number": 122, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 142, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 150, "usage_type": "call"}, {"api_name": "pyriemann.classification.KNearestNeighbor", "line_number": 151, "usage_type": "call"}, {"api_name": "pyriemann.classification.MDM", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "459066734", "text": "import requests\nimport asyncio\n\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ViewSet, ModelViewSet\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import PermissionDenied, NotAcceptable\nfrom rest_framework import status\n\nfrom api.models import MegaPlanCredential, MegaPlanToken\nfrom api.serializers import MegaPlanCredentialSerializer#, MegaPlanTokenSerializer\n\nfrom api.mixins import CredentialMixin, MegaPlanMixin\n\nfrom methods.models import BaseAPIMethod\n\nclass GetCredentialModelViewSet(ModelViewSet):\n\tserializer_class = MegaPlanCredentialSerializer\n\n\tdef create(self, request, *args, **kwargs):\n\t\tif 'HTTP_AUTHORIZATION' not in request.META:\n\t\t\traise PermissionDenied({\"status\": \"error\", \"message\": \"You don't have permission to access.\"})\n\t\telse:\n\t\t\tuser_info = requests.get(\"https://dev-api.smartanalytics.io/api/auth-new/me\",headers={\"Authorization\":request.META['HTTP_AUTHORIZATION']}).json()\n\t\t\ttry:\n\t\t\t\tuser_id = user_info[\"id\"]\n\t\t\texcept:\n\t\t\t\traise PermissionDenied({\"status\": \"error\", \"message\": \"Authentication failed.\"})\n\t\t# user_id = 16\n\t\tdata \t\t\t= dict()\n\t\tdata[\"login\"] \t= request.data.get(\"login\", None)\n\t\tdata[\"crm_url\"]\t= request.data.get(\"crm_url\", None)\n\t\tdata[\"password\"] \t= request.data.get(\"password\", None)\n\n\t\t# data.update({\"user_id\": 2310})\n\t\tdata.update({\"user_id\": user_id})\n\n\t\tserializer = self.get_serializer(data=data, context={\"request\": request})\n\n\t\tif serializer.is_valid():\n\t\t\turl = serializer.initial_data['crm_url'] + '/api/v3/auth/access_token'\n\t\t\t# params for authenticate in megaplan\n\t\t\tparams \t\t\t\t\t= dict()\n\t\t\tparams['username'] \t\t= serializer.initial_data['login']\n\t\t\tparams['password']\t\t= serializer.initial_data['password']\n\t\t\tparams['grant_type']\t= 'password'\n\t\t\t\n\t\t\ttry:\n\t\t\t\tmegaplan_response \t= requests.post(url, json=params)\n\t\t\texcept:\n\t\t\t\traise PermissionDenied({\"status\": \"error\", \"message\": \"You don't have permission to access.\"})\n\n\t\t\tif megaplan_response.status_code != 200:\n\t\t\t\traise PermissionDenied({\"status\": \"error\", \"message\": \"You don't have permission to access.\"})\n\n\n\t\t\tif not MegaPlanCredential.objects.filter(\n\t\t\t\t\t\tlogin \t= serializer.initial_data['login'],\n\t\t\t\t\t\tcrm_url = serializer.initial_data['crm_url'],\n\t\t\t\t\t\tuser_id = data['user_id']\n\t\t\t\t\t).exists():\n\t\t\t\tserializer.save()\n\n\t\t\tmegaplan_json = megaplan_response.json() \n\t\t\t\n\t\t\tcredential = MegaPlanCredential.objects.get(\n\t\t\t\t\tlogin \t= serializer.initial_data['login'],\n\t\t\t\t\tcrm_url = serializer.initial_data['crm_url'],\n\t\t\t\t\tuser_id = serializer.initial_data['user_id']\n\t\t\t\t\t)\n\n\t\t\tif not MegaPlanToken.objects.filter(user=credential).exists():\n\t\t\t\ttoken = MegaPlanToken.objects.create(\n\t\t\t\t\tuser \t\t\t= credential,\n\t\t\t\t\taccess_token \t= megaplan_json['access_token'],\n\t\t\t\t\trefresh_token \t= megaplan_json['refresh_token'],\n\t\t\t\t\texpires_in \t\t= megaplan_json['expires_in']\n\t\t\t\t\t)\n\t\t\t\ttoken.save()\n\t\t\telse:\n\t\t\t\ttoken \t\t\t\t= MegaPlanToken.objects.get(user=credential)\n\t\t\t\ttoken.access_token \t= megaplan_json['access_token']\n\t\t\t\ttoken.refresh_token = megaplan_json['refresh_token']\n\t\t\t\ttoken.expires_in \t= megaplan_json['expires_in']\n\t\t\t\ttoken.save()\n\n\t\t\tnew_serializer = self.get_serializer(credential, context={\"request\": request})\n\t\t\theaders = self.get_success_headers(new_serializer.data)\n\n\t\t\treturn Response(new_serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\t\telse:\n\t\t\traise PermissionDenied({\"status\": \"error\", \"message\": \"Invalid data.\"})\n\n\n\nclass CredentialDetailModelViewSet(CredentialMixin, ModelViewSet):\n\tserializer_class = MegaPlanCredentialSerializer\n\n\tdef get_info(self, request, cred_id, format=None):\n\t\tcredential = self.get_credential(request, cred_id)\n\t\tserializer = self.get_serializer(credential, context={\"request\": request})\n\n\t\treturn Response(serializer.data)\n\nclass HomeAPIView(ViewSet):\n\tdef get_info(self, request, format=None):\n\t\tabs_uri = request.build_absolute_uri(\"\")\n\t\tcontext = {\n\t\t\t\t\"status\": \"success\",\n\t\t\t\t \"auth\": {\n\t\t\t\t \t\"{}get_credentials/\".format(abs_uri): \"auth for work with megplan\",\n\t\t\t\t \t},\n\t\t\t\t \"crm_methods\": {\n\t\t\t\t \t\"{}deals/\".format(abs_uri): \"deals\",\n\t\t\t\t \t\"{}offers/\".format(abs_uri): \"offers\",\n\t\t\t\t \t\"{}invoices/\".format(abs_uri): \"invoice\",\n\t\t\t\t \t\"{}employeers/\".format(abs_uri): \"employee\",\n\t\t\t\t },\n\t\t\t\t \"filters\": {\n\t\t\t\t \t\"{}filter/deal/\".format(abs_uri): \"deal filters list\",\n\t\t\t\t \t\"{}filter/offer/\".format(abs_uri): \"offer filters list\",\n\t\t\t\t \t\"{}filter/invoice/\".format(abs_uri): \"invoice filters list\",\n\t\t\t\t \t\"{}filter/employee/\".format(abs_uri): \"employee filters list\",\n\t\t\t\t }\n\t\t\t }\n\t\treturn Response(context)\n\nclass GetDealAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\tdef post(self, request, format=None):\n\t\tcredential \t= self.get_credential(request)\n\t\tmethod \t\t= '/api/v3/deal'\n\n\t\treturn self.megaplan_call(credential, method)\n\nclass GetInvoiceAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\tdef post(self, request, format=None):\n\t\tcredential \t= self.get_credential(request)\n\t\tmethod \t\t= '/api/v3/invoice'\n\n\t\treturn self.megaplan_call(credential, method)\n\n\nclass GetOffersAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\tdef post(self, request, format=None):\n\t\tcredential \t= self.get_credential(request)\n\t\tmethod \t\t= '/api/v3/offer'\n\n\t\treturn self.megaplan_call(credential, method)\n\nclass GetEmployeersAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\tdef post(self, request, format=None):\n\t\tcredential \t= self.get_credential(request)\n\t\tmethod \t= '/api/v3/employee'\n\n\t\treturn self.megaplan_call(credential, method)\n\n\nclass LookupAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\n\tdef post(self, request, format=None):\n\t\tcredential = self.get_credential(request)\n\n\t\tdeal_state \t\t\t= dict()\n\t\tdeal_state[\"code\"] \t= \"state\"\n\t\tdeal_state[\"name\"] \t= \"Статус\"\n\t\tdeal_state[\"type\"] \t= \"MULTISELECT\"\n\t\tdeal_state[\"enums\"] = list()\n\n\t\tdeal_state_id \t\t\t= dict()\n\t\tdeal_state_id[\"code\"] \t= \"id\"\n\t\tdeal_state_id[\"name\"] \t= \"id\"\n\t\tdeal_state_id[\"type\"] \t= \"SELECT\"\n\t\tdeal_state_id[\"enums\"] \t= [\n\t\t\t{\n\t\t\t\t\"code\": 2,\n\t\t\t\t\"value\": \"Инетерес\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": 3,\n\t\t\t\t\"value\": \"Заказ\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": 4,\n\t\t\t\t\"value\": \"Выставлен счет\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": 5,\n\t\t\t\t\"value\": \"Оплачен\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": 6,\n\t\t\t\t\"value\": \"Завершен\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": 7,\n\t\t\t\t\"value\": \"Отказ\"\n\t\t\t}\n\n\t\t]\n\n\t\tdeal_state[\"enums\"].append(deal_state_id)\n\n\t\tdeal_contractor \t\t = dict()\n\t\tdeal_contractor[\"code\"] = \"contractor\"\n\t\tdeal_contractor[\"name\"] = \"Тип клиента\"\n\t\tdeal_contractor[\"type\"] = \"MULTISELECT\"\n\t\tdeal_contractor[\"enums\"] = list()\n\n\t\tdeal_contractror_ct \t\t = dict()\n\t\tdeal_contractror_ct[\"code\"] = \"contentType\"\n\t\tdeal_contractror_ct[\"name\"] = \"Вид контактного лица\"\n\t\tdeal_contractror_ct[\"type\"] = \"SELECT\"\n\t\tdeal_contractror_ct[\"enums\"] = [\n\t\t\t{\n\t\t\t\t\"code\": \"ContractorCompany\",\n\t\t\t\t\"value\": \"Компания\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"ContractorHuman\",\n\t\t\t\t\"value\": \"Контактное лицо компании или клиент-человек\"\n\t\t\t}\n\t\t]\n\n\t\tdeal_contractor_type = dict()\n\t\tdeal_contractor_type[\"code\"] = \"type\"\n\t\tdeal_contractor_type[\"name\"] = \"Тип клиента\"\n\t\tdeal_contractor_type[\"type\"] = \"SELECT\"\n\t\tdeal_contractor_type[\"enums\"] = [\n\t\t\t{\n\t\t\t\t\"code\": \"employee\",\n\t\t\t\t\"value\": \"Сотрудник\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"client\",\n\t\t\t\t\"value\": \"Клиент\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"subcontractor\",\n\t\t\t\t\"value\": \"Подрадчик\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"partner\",\n\t\t\t\t\"value\": \"Партнер\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"competitor\",\n\t\t\t\t\"value\": \"Конкурент\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"supplier\",\n\t\t\t\t\"value\": \"Поставщик\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"own\",\n\t\t\t\t\"value\": \"Наша компания\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"code\": \"guest\",\n\t\t\t\t\"value\": \"Гость\"\n\t\t\t}\n\t\t]\n\n\t\t\n\n\t\tdeal_contractor[\"enums\"].append(deal_contractror_ct)\n\t\tdeal_contractor[\"enums\"].append(deal_contractor_type)\n\t\tcontext = [deal_state, deal_contractor]\n\n\t\treturn Response(context)\n\n\nclass MegaplanCallAPIView(CredentialMixin, MegaPlanMixin, APIView):\n\tdef get(self, request,method_name, format=None):\n\t\tcredential = self.get_credential(request)\n\n\t\tloop = asyncio.new_event_loop()\n\t\tasyncio.set_event_loop(loop)\n\t\tcontext = loop.run_until_complete(self.megaplan_call(credential, method_name))\n\n\t\treturn Response(context)\n\n", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 17, "usage_type": "name"}, {"api_name": "api.serializers.MegaPlanCredentialSerializer", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 51, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 54, "usage_type": "call"}, {"api_name": "api.models.MegaPlanCredential.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "api.models.MegaPlanCredential.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "api.models.MegaPlanCredential", "line_number": 57, "usage_type": "name"}, {"api_name": "api.models.MegaPlanCredential.objects.get", "line_number": 66, "usage_type": "call"}, {"api_name": "api.models.MegaPlanCredential.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "api.models.MegaPlanCredential", "line_number": 66, "usage_type": "name"}, {"api_name": "api.models.MegaPlanToken.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "api.models.MegaPlanToken.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "api.models.MegaPlanToken", "line_number": 72, "usage_type": "name"}, {"api_name": "api.models.MegaPlanToken.objects.create", "line_number": 73, "usage_type": "call"}, {"api_name": "api.models.MegaPlanToken.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "api.models.MegaPlanToken", "line_number": 73, "usage_type": "name"}, {"api_name": "api.models.MegaPlanToken.objects.get", "line_number": 81, "usage_type": "call"}, {"api_name": "api.models.MegaPlanToken.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "api.models.MegaPlanToken", "line_number": 81, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 90, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 90, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 90, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 92, "usage_type": "call"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 96, "usage_type": "name"}, {"api_name": "api.serializers.MegaPlanCredentialSerializer", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 103, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ViewSet", "line_number": 105, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 126, "usage_type": "call"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 128, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 128, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 128, "usage_type": "name"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 135, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 135, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 135, "usage_type": "name"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 143, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 143, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 143, "usage_type": "name"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 150, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 150, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 150, "usage_type": "name"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 158, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 158, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 158, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 269, "usage_type": "call"}, {"api_name": "api.mixins.CredentialMixin", "line_number": 272, "usage_type": "name"}, {"api_name": "api.mixins.MegaPlanMixin", "line_number": 272, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 272, "usage_type": "name"}, {"api_name": "asyncio.new_event_loop", "line_number": 276, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 277, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 280, "usage_type": "call"}]} +{"seq_id": "24535426", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.metrics import auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n\noutput_file = open('./data/blobber-sent-analysis-01.txt', 'w')\n\nDataSet = pd.read_csv(\"./data/blobber-sent-new-01.csv\",header=None, names=['sentWordNet','label'])\npredictions = [1 if x=='1' else 0 for x in DataSet['sentWordNet'].tolist()]\nlabel = [1 if x=='1' else 0 for x in DataSet['label'].tolist()]\n\nfpr, tpr, thresholds = metrics.roc_curve(label, predictions, pos_label=1)\nroc_auc = auc(fpr,tpr)\naccu = accuracy_score(label, predictions)\npre = precision_score(label, predictions)\nrec = recall_score(label, predictions)\nfone = f1_score(label, predictions)\nrocScore = roc_auc_score(label, predictions)\n\noutput_file.write(\n 'Blobber Default Sentiment Analyzer:'\n '\\nNumber of instances: ' + str(len(label)) +\n '\\nAccuracy: ' + str(accu) +\n '\\nPrecision: ' + str(pre) +\n '\\nRecall: ' + str(rec) +\n '\\nF-1: ' + str(fone) +\n '\\nROC Score: ' + str(rocScore)\n)\noutput_file.close()\n\nplt.figure()\nplt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], 'k--')\n\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\n\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic')\nplt.legend(loc=\"lower right\")\nplt.show()", "sub_path": "StockTweets/Blobber_sent_analysis_01.py", "file_name": "Blobber_sent_analysis_01.py", "file_ext": "py", "file_size_in_byte": 1391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 12, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "328122313", "text": "from flask import render_template, flash, redirect, session, url_for, request, g\nfrom flask.ext.login import login_user, logout_user, current_user, login_required\nfrom datetime import datetime\nfrom app import app, db, models\nfrom .models import User, itemdata, menutable, vendortable, analytics, averagedb, uniquedb\nfrom config import ITEMS_PER_PAGE, WHOOSH_BASE, SECRET_KEY\nimport PIL, uuid, requests, re\nfrom PIL import ImageFont, Image, ImageDraw\nfrom sqlalchemy import func, create_engine, distinct\nfrom sqlalchemy.orm import sessionmaker\n\ndef sumSessionCounter(): #user tracking db.\n try:\n session['counter']\n except KeyError:\n session['counter'] = uuid.uuid1()\n sess = analytics(sid=session['counter'], ip=request.remote_addr, timestamp=datetime.utcnow(), url=request.url)\n db.session.add(sess)\n db.session.commit()\n\n@app.route('/', defaults={'page': 1}, methods=['GET', 'POST'])\n@app.route('/', methods=['GET', 'POST'])\ndef index(page):\n engine = create_engine('mysql://root:sh030780@localhost/chengben')\n Sessions = sessionmaker(bind=engine)\n Sessions.configure(bind=engine)\n session = Sessions()\n test = session.query(func.count(analytics.sid), analytics.sid).group_by(analytics.sid).order_by(func.count(analytics.sid).desc()).all()\n itemsall = analytics.query.filter(analytics.parsed == None)\n for x in test: #avgdb start\n begin = None\n end = None\n datevar = datetime.today()\n change = 0\n for y in itemsall:\n if y.sid == x[1]: #later if y.timestamp - current.time > 30 minutes, to ensure session is over. if not, change = 0\n change = 1\n sid = y.sid\n ip = y.ip\n user_id=y.user_id\n clicks=x[0]\n browser = y.browser\n #average = averagedb(sid=y.sid, ip=y.ip, user_id=y.user_id, clicks=x[0], browser=y.browser)\n if (begin == None) or (y.timestamp < begin):\n begin = y.timestamp\n if (end == None) or (y.timestamp > end):\n end = y.timestamp\n if change == 1:\n if (end != None) and (begin != None):\n dura = (end-begin).total_seconds()\n else:\n dura = 0\n average = averagedb(sid=sid, ip=ip, user_id=user_id, clicks=clicks, browser=browser, duration=dura, datevar=end)\n db.session.add(average)\n db.session.commit() #avg.db_end\n\n for t in itemsall: #parsed flag switcher\n t.parsed = 0\n db.session.add(t)\n db.session.commit() \n\n unique = []\n for y in itemsall:\n if (y.sid, y.url) not in unique:\n stringvar = (y.sid, y.url)\n unique.append(stringvar)\n uni_db = uniquedb(sid=y.sid, ip=y.ip, user_id=y.user_id, url=y.url, date=y.timestamp)\n db.session.add(uni_db)\n db.session.commit()\n \n test2 = session.query(func.count(uniquedb.url), uniquedb.url).group_by(uniquedb.url).order_by(func.count(uniquedb.url).desc()).all()\n L2 = []\n for t in test2:\n if 'L2' in t[1]:\n L2.append(t)\n L4 = []\n for t in test2:\n if 'L4' in t[1]:\n textstr = t[1][:t[1].find('/?keyword')]\n apper = (t[0], textstr)\n L4.append(apper)\n session.close()\n return render_template('metrics.html',\n title='Metrics', test=test, test2=test2, L2=L2, L4=L4)\n\n", "sub_path": "app/views_admin.py", "file_name": "views_admin.py", "file_ext": "py", "file_size_in_byte": 3499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.session", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 16, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 16, "usage_type": "call"}, {"api_name": "models.analytics", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.remote_addr", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.url", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.db.session.add", "line_number": 18, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 18, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 19, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.session.query", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.func.count", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 28, "usage_type": "name"}, {"api_name": "models.analytics.sid", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.analytics", "line_number": 28, "usage_type": "name"}, {"api_name": "models.analytics.query.filter", "line_number": 29, "usage_type": "call"}, {"api_name": "models.analytics.query", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.analytics", "line_number": 29, "usage_type": "name"}, {"api_name": "models.analytics.parsed", "line_number": 29, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "models.averagedb", "line_number": 53, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 54, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 54, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 55, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 55, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 59, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 59, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 59, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 60, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 60, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 60, "usage_type": "name"}, {"api_name": "models.uniquedb", "line_number": 67, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 68, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 68, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 68, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.session.query", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "sqlalchemy.func.count", "line_number": 71, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 71, "usage_type": "name"}, {"api_name": "models.uniquedb.url", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.uniquedb", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.session.close", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 21, "usage_type": "call"}, {"api_name": "app.app", "line_number": 21, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 22, "usage_type": "call"}, {"api_name": "app.app", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "127080995", "text": "#coding: utf-8\r\nimport config\r\nfrom telegram.ext import Updater, CommandHandler\r\nimport logging\r\nimport parser_prg as parser\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\n\r\nWITH_PROXY = True # You need to change this variable to True if you want to start the bot with a proxy, which is configured in config.py.\r\nif WITH_PROXY:\r\n updater = Updater(token=config.TOKEN, request_kwargs=config.REQUEST_KWARGS)\r\nelse:\r\n updater = Updater(token=config.TOKEN)\r\ndispatcher = updater.dispatcher\r\n\r\nunnamed_count = 0\r\npodcasts = {}\r\nlinks_textfile = open('links.txt', mode='r', encoding='utf8')\r\nlinks_lines = links_textfile.readlines()\r\nfor link_line in links_lines:\r\n podcast_title = link_line[:link_line.find('http')]\r\n podcast_link = link_line[link_line.find('http'):]\r\n if podcast_title == ' ':\r\n unnamed_count += 1\r\n podcasts['Без имени ' + str(unnamed_count) + ' '] = podcast_link\r\n else:\r\n podcasts[podcast_title] = podcast_link\r\nlinks_textfile.close()\r\n\r\n\r\ndef start(bot, update):\r\n txt = \"Начнём!\\n Введите команду '/s запрос' или '/search запрос', чтобы начать поиск.\" + \\\r\n \"\\nВведите команду '/u' или '/update', чтобы обновить список подкастов.\"\r\n bot.send_message(chat_id=update.message.chat_id, text=txt)\r\n\r\ndef search(bot, update, args):\r\n try:\r\n if podcasts == {}:\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Идёт получение списка подкастов. Это займёт около 5 минут.\")\r\n parser.main()\r\n title = ''\r\n for arg in args:\r\n title += arg + ' '\r\n title = title[:-1]\r\n substr = ''\r\n search_results = []\r\n for k in podcasts.keys():\r\n if title in k:\r\n search_results.append(k + podcasts[k])\r\n search_results.sort()\r\n for res in search_results:\r\n substr += res + '\\n'\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Результаты поиска:\\n\" + substr)\r\n except:\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Введите запрос.\")\r\n\r\ndef podcast_list_update(bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Идёт обновление списка подкастов. Это займёт около 5 минут.\")\r\n parser.main()\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Обновление завершено!\")\r\n\r\n\r\nstart_handler = CommandHandler('start', start)\r\ndispatcher.add_handler(start_handler)\r\nsearch_handler = CommandHandler(['s', 'search'], search, pass_args=True)\r\ndispatcher.add_handler(search_handler)\r\nupdate_handler = CommandHandler(['u', 'update'], podcast_list_update)\r\ndispatcher.add_handler(update_handler)\r\nupdater.start_polling()\r\n", "sub_path": "bot_mayak/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 2971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "telegram.ext.Updater", "line_number": 10, "usage_type": "call"}, {"api_name": "config.TOKEN", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.REQUEST_KWARGS", "line_number": 10, "usage_type": "attribute"}, {"api_name": "telegram.ext.Updater", "line_number": 12, "usage_type": "call"}, {"api_name": "config.TOKEN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "parser_prg.main", "line_number": 39, "usage_type": "call"}, {"api_name": "parser_prg.main", "line_number": 58, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 62, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 64, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "227805690", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom models import UchetFile, UchetData\nfrom forms import UchetFileForm\nimport os\n\n# Create your views here.\n\ndef main_page(request):\n if request.method == \"POST\":\n form = UchetFileForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n uchet = UchetFile.objects.create(title=data['title'], file=data['file'])\n name = os.path.basename(uchet.file.name)\n uchet.path = '/my_app/static/' + name\n uchet.save()\n return redirect(show)\n else:\n data = form.errors\n return HttpResponse(\"{0}\".format(data))\n else:\n context = {'my_form':UchetFileForm()}\n return render(request, 'main_page.html', context)\n\n\ndef show(request):\n uchet = UchetFile.objects.filter().last()\n with open(\"{0}{1}\".format(os.getcwd(), uchet.path)) as somefile:\n for i in somefile.readlines():\n my_list = i.split(',')\n UchetData.objects.create(number=int(my_list[0]), name=my_list[1], cost=int(my_list[2]),\n count=int(my_list[3]), file=uchet)\n data = UchetData.objects.filter(file=uchet)\n context = {'data':data}\n return render(request, 'show_page.html', context)", "sub_path": "my_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "forms.UchetFileForm", "line_number": 11, "usage_type": "call"}, {"api_name": "models.UchetFile.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "models.UchetFile.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.UchetFile", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "forms.UchetFileForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "models.UchetFile.objects.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "models.UchetFile.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.UchetFile", "line_number": 28, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 29, "usage_type": "call"}, {"api_name": "models.UchetData.objects.create", "line_number": 32, "usage_type": "call"}, {"api_name": "models.UchetData.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.UchetData", "line_number": 32, "usage_type": "name"}, {"api_name": "models.UchetData.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "models.UchetData.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.UchetData", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "560984336", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom xml.sax import make_parser\nfrom xml.sax.handler import ContentHandler\n\n\nclass SmallSMILHandler(ContentHandler):\n\n def __init__ (self):\n self.width = \"\"\n self.height = \"\"\n self.background_color = \"\"\n self.id = \"\"\n self.top = \"\"\n self.bottom = \"\"\n self.left = \"\"\n self.right = \"\"\n self.src = \"\"\n self.region = \"\"\n self.begin = \"\"\n self.dur = \"\"\n self.misdatos = []\n\n def startElement(self, name, attrs):\n\n if name == 'root-layout':\n self.root_layout = {}\n self.misdatos.append(name)\n # De esta manera tomamos los valores de los atributos\n self.widht = attrs.get('widht', \"\")\n self.height = attrs.get('height', \"\")\n self.background_color = attrs.get('background-color', \"\")\n self.root_layout=['widht' + ' = ' + self.width, 'height' + ' = ' + \nself.height, 'backgroundcolor' + ' = ' + self.background_color]\n self.misdatos.append(self.root_layout)\n \n elif name == 'region':\n self.region = {}\n self.misdatos.append(name)\n self.id = attrs.get('id', \"\")\n self.top = attrs.get('top', \"\")\n self.bottoms = attrs.get('bottoms', \"\")\n self.left = attrs.get('left', \"\")\n self.right = attrs.get('right', \"\")\n self.region=['id'+' = ' + self.id,'top' + ' = ' + self.top, \n'bottom' + ' = ' + self.bottom, 'left' + ' = ' + self.left, 'right' + \n' = ' + self.right]\n self.misdatos.append(self.region)\n\n elif name == 'img':\n self.img = {}\n self.misdatos.append(name)\n self.src = attrs.get('src', \"\")\n self.region = attrs.get('region', \"\")\n self.begin = attrs.get('begin', \"\")\n self.dur = attrs.get('dur', \"\")\n self.img = ['src' + ' = ' + self.src,'region' + ' = ' \n+ self.region, 'begin' + ' = ' + self.begin, 'dur' + ' = ' + self.dur]\n self.misdatos.append(self.img)\n\n elif name == 'audio':\n self.audio = {}\n self.misdatos.append(name)\n self.src = attrs.get('src', \"\")\n self.begin = attrs.get('begin', \"\")\n self.dur = attrs.get('dur', \"\")\n self.audio=['src' + ' = ' + self.src,'begin' + ' = ' + self.begin,\n'dur' + ' = ' + self.dur]\n self.misdatos.append(self.audio)\n \n elif name == 'textstream':\n self.texstream = {}\n self.misdatos.append(name)\n self.src = attrs.get('src', \"\")\n self.region = attrs.get('region', \"\")\n self.texstream = ['srs' + ' = ' + self.src, ' region' + \n' = ' + self.region]\n self.misdatos.append(self.texstream)\n\n def get_tags(self):\n \n print (self.misdatos)\n return self.misdatos\n\nif __name__ == \"__main__\":\n parser = make_parser()\n sHandler = SmallSMILHandler()\n parser.setContentHandler(sHandler)\n parser.parse(open('karaoke.smil'))\n sHandler.get_tags()\n", "sub_path": "smallsmilhandler.py", "file_name": "smallsmilhandler.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "xml.sax.handler.ContentHandler", "line_number": 7, "usage_type": "name"}, {"api_name": "xml.sax.make_parser", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "433096351", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.Index.as_view(), name='index'),\n url(r'^clubs/$', views.ClubList.as_view(), name='club_list'),\n url(r'^clubs/add/$', views.ClubCreate.as_view(), name='club_create'),\n url(r'^clubs/(?P[\\w-]+)/$', views.ClubDetail.as_view(), name='club_detail'),\n url(r'^clubs/(?P[\\w-]+)/edit/$', views.ClubUpdate.as_view(), name='club_update'),\n url(r'^clubs/(?P[\\w-]+)/archers/add/$', views.ClubArcherCreate.as_view(), name='club_archer_create'),\n url(r'^clubs/(?P[\\w-]+)/archived/$', views.ArchiveArcherList.as_view(), name='archive_archer_list'),\n url(r'^counties/add/$', views.CountyCreate.as_view(), name='county_create'),\n url(r'^archer/add/$', views.ArcherCreate.as_view(), name='archer_create'),\n url(r'^archer/(?P\\d+)/$', views.ArcherDetail.as_view(), name='archer_detail'),\n url(r'^archer/(?P\\d+)/edit/$', views.ArcherUpdate.as_view(), name='archer_update'),\n url(r'^archer/(?P\\d+)/archive/$', views.ArcherArchive.as_view(), name='archer_archive'),\n]\n", "sub_path": "core/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "441007536", "text": "#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: \n@file: 922. 按奇偶排序数组 II.py\n@time: 2020/11/12 09:49\n@desc: \n\"\"\"\nfrom typing import List\n\"\"\"\n给定一个非负整数数组 A, A 中一半整数是奇数,一半整数是偶数。\n\n对数组进行排序,以便当 A[i] 为奇数时,i 也是奇数;当 A[i] 为偶数时, i 也是偶数。\n\n你可以返回任何满足上述条件的数组作为答案。\n\n \n\n示例:\n\n输入:[4,2,5,7]\n输出:[4,5,2,7]\n解释:[4,7,2,5],[2,5,4,7],[2,7,4,5] 也会被接受。\n \n\n提示:\n\n2 <= A.length <= 20000\nA.length % 2 == 0\n0 <= A[i] <= 1000\n\"\"\"\n\n\nclass Solution:\n def sortArrayByParityII(self, A: List[int]) -> List[int]:\n # odd_list = []\n # even_list = []\n # for i in range(len(A)):\n # # 奇数放到了偶数位置\n # if A[i] & 1 and not (i & 1):\n # odd_list.append(i)\n # # 偶数放到了奇数位置\n # elif not (A[i] & 1) and i & 1:\n # even_list.append(i)\n # for j in range(len(odd_list)):\n # A[odd_list[j]], A[even_list[j]] = A[even_list[j]], A[odd_list[j]]\n # return A\n\n i, j, length = 0, 1, len(A)-1\n while i <= length and j <= length:\n while i <= length and not(A[i] & 1):\n i += 2\n while j <= length and (A[j] & 1):\n j += 2\n if i <= length and j <= length:\n A[i], A[j] = A[j], A[i]\n return A\n\n\n\na = Solution().sortArrayByParityII([4,2,5,7])\nprint(a)\n\n", "sub_path": "all_topic/esay_topic/922. 按奇偶排序数组 II.py", "file_name": "922. 按奇偶排序数组 II.py", "file_ext": "py", "file_size_in_byte": 1572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "typing.List", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "586556815", "text": "import re\nimport requests \nfrom bs4 import BeautifulSoup\nurl = 'https://www.amazon.com/'\n\nheaders = {'user-agent': 'kaveh'}\n\nr = requests.get(url,headers=headers)\nsoup = BeautifulSoup(r.text,'lxml')\nsoup_Option = soup.select('#searchDropdownBox')\n\nSoup_value = soup_Option[0].children\n\nfor items in Soup_value:\n # print(items[0].ge)\n print(items)\n \n # print(type(items))\n # x = re.findall('value=\"(.[^\"]+)\"',str(items))\n ", "sub_path": "webscarp-project-maktabkhooneh.py", "file_name": "webscarp-project-maktabkhooneh.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "180338189", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/hanzz/releases/odcs/server/odcs/server/api_utils.py\n# Compiled at: 2018-01-11 04:20:51\n# Size of source mod 2**32: 5556 bytes\nimport copy\nfrom flask import request, url_for\nfrom odcs.server.models import Compose\nimport six\n\ndef validate_json_data(dict_or_list, level=0):\n \"\"\"\n Checks that json data represented by dict `dict_or_list` is valid ODCS\n input. Raises ValueError in case the json data does not pass validation.\n\n This mainly checks that any value in json data does not contain forbidden\n characters and data types which could potentially lead to dangerous pungi\n configuration being generated.\n \"\"\"\n if isinstance(dict_or_list, dict):\n iterator = dict_or_list.items()\n else:\n iterator = enumerate(dict_or_list)\n for k, v in iterator:\n if isinstance(v, dict):\n if level != 0 or k not in ('source', ):\n raise ValueError(\"Only 'source' key is allowed to contain dict.\")\n validate_json_data(v, level + 1)\n elif isinstance(v, list):\n validate_json_data(v, level + 1)\n else:\n if isinstance(v, six.string_types):\n allowed_chars = [\n ' ', '-', '/', '_', '.', ':', '#']\n if not all(c.isalnum() or c in allowed_chars for c in v):\n raise ValueError('Only alphanumerical characters and %r characters are allowed in ODCS input variables' % allowed_chars)\n else:\n if isinstance(v, (int, float)):\n continue\n else:\n raise ValueError(\"Only dict, list, str, unicode, int, float and bool types are allowed in ODCS input variables, but '%s' has '%s' type\" % (\n k, type(v)))\n\n\ndef pagination_metadata(p_query, request_args):\n \"\"\"\n Returns a dictionary containing metadata about the paginated query.\n This must be run as part of a Flask request.\n :param p_query: flask_sqlalchemy.Pagination object\n :param request_args: a dictionary of the arguments that were part of the\n Flask request\n :return: a dictionary containing metadata about the paginated query\n \"\"\"\n request_args_wo_page = dict(copy.deepcopy(request_args))\n for key in ('page', 'per_page', 'endpoint'):\n if key in request_args_wo_page:\n request_args_wo_page.pop(key)\n\n for key in request_args:\n if key.startswith('_'):\n request_args_wo_page.pop(key)\n\n pagination_data = {'page':p_query.page, \n 'pages':p_query.pages, \n 'per_page':p_query.per_page, \n 'prev':None, \n 'next':None, \n 'total':p_query.total, \n 'first':url_for(request.endpoint, page=1, per_page=p_query.per_page, _external=True, **request_args_wo_page), \n 'last':url_for(request.endpoint, page=p_query.pages, per_page=p_query.per_page, \n _external=True, **request_args_wo_page)}\n if p_query.has_prev:\n pagination_data['prev'] = url_for(request.endpoint, page=p_query.prev_num, per_page=p_query.per_page, \n _external=True, **request_args_wo_page)\n if p_query.has_next:\n pagination_data['next'] = url_for(request.endpoint, page=p_query.next_num, per_page=p_query.per_page, \n _external=True, **request_args_wo_page)\n return pagination_data\n\n\ndef filter_composes(flask_request):\n \"\"\"\n Returns a flask_sqlalchemy.Pagination object based on the request parameters\n :param request: Flask request object\n :return: flask_sqlalchemy.Pagination\n \"\"\"\n search_query = dict()\n for key in ('owner', 'source_type', 'source', 'state', 'koji_task_id'):\n if flask_request.args.get(key, None):\n search_query[key] = flask_request.args[key]\n\n query = Compose.query\n if search_query:\n query = (query.filter_by)(**search_query)\n page = flask_request.args.get('page', 1, type=int)\n per_page = flask_request.args.get('per_page', 10, type=int)\n return query.paginate(page, per_page, False)", "sub_path": "pycfiles/odcs-0.2.45.tar/api_utils.cpython-36.py", "file_name": "api_utils.cpython-36.py", "file_ext": "py", "file_size_in_byte": 4131, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "six.string_types", "line_number": 34, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.endpoint", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.endpoint", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.endpoint", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.endpoint", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "odcs.server.models.Compose.query", "line_number": 94, "usage_type": "attribute"}, {"api_name": "odcs.server.models.Compose", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "201060591", "text": "'''\n\n Copyright 2017 The Regents of the University of Colorado\n\n Licensed under the Apache License, Version 2.0 (the \"License\")\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n'''\n calculate.py \n Python Version: 3.6.3\n\n This module contains code for running functions to create calculated values.\n It assumes the input dataset has been loaded and migrated to OHDSI. It runs\n all functions associated with the study_id, reading input from OHDSI and writing\n output to OHDSI. This is driven by configuration data in tables\n ohdsi_calculation_function, ohdsi_calculation_argument that describe the sources\n and destinations for data. The name of the function in the database much match\n the name of the function in Python.\n\n NB because of the way the observation and measurement Ids are tracked here, not as a sequence or auto-increment,\n you can't really run more then one migrate script at a time.\n\n This is research code for demonstration purposes only.\n\n croeder 7/2017 chris.roeder@ucdenver.edu\n'''\n\nimport importlib\nimport logging\nimport sys\nimport traceback\n\nfrom psycopg2.extras import RealDictCursor\nfrom HeartData.person import BasePerson\nfrom HeartData.study import get_study_details\nfrom HeartData import calculate_functions\nfrom ui.models import Concept\n\nimport psycopg2\nimport psycopg2.extras\nimport argh\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name\n\n\nEPOCH_START_DEFAULT_DATE=\"1970-01-01\"\n\nclass CalculatedFieldFunction(object):\n \"\"\"\n For applying the calculation functions for creating calculated fields.\n This code gets and organizes the related metadata, fetches the inputs,\n calcuates the value and stores it.\n Uses tables ohdsi_calculation_function, ohdsi_calculation_argument\n \"\"\"\n\n def __init__(self, con, study_id, name, to_vocabulary_id, to_concept_code, module_name):\n\n if name is None:\n raise Exception(\"function in study {} for vocab:{} and concept:{} needs a non-None name\", study_id, to_vocabulary_id, to_concept_code)\n if module_name is None:\n raise Exception(\"function {} in study {} for vocab:{} and concept:{} needs a non-None module_name.\", name, study_id, to_vocabulary_id, to_concept_code)\n\n self._to_vocabulary_id = to_vocabulary_id\n self._to_concept_code = to_concept_code\n self._connection = con\n self._study_id = study_id\n self._function_name = name\n self._module_name = module_name\n self._observation_number = self._get_max_observation_number() +1\n self._measurement_number = self._get_max_measurement_number() +1\n self._person_obj = BasePerson.factory_on_id(study_id)\n i = importlib.import_module(module_name)\n self._function_ref = getattr(i, self._function_name)\n cursor = self._connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n # reconcile with query in get_function_instances() TODO\n cursor.execute((\"SELECT to_vocabulary_id, to_concept_code, to_table, to_column, expression \"\n \" FROM ohdsi_calculation_function \"\n \" WHERE function_name = %s \"\n \" AND study_id = %s \"\n \" AND to_vocabulary_id = %s \"\n \" AND to_concept_code = %s\"\n \" ORDER BY function_order\"),\n (self._function_name, str(self._study_id), to_vocabulary_id, to_concept_code))\n rows = cursor.fetchall()\n if len(rows) > 1:\n logger.error(\"more than one row in CalculatedFieldFunction init for study:%d, name:%s\",\n self._study_id, self._function_name)\n raise Exception(\"more than one row in CalculatedFieldFunction init for study:{}, name:{}\".format(\n self._study_id, self._function_name))\n if not rows:\n logger.error(\"No definition found in CalculatedFieldFunction init for study:%d, name:%s vocab:%s term:%s\",\n self._study_id, self._function_name, to_vocabulary_id, to_concept_code)\n raise Exception(\"No definition found in CalculatedFieldFunction init for study:{}, name:{} vocab:{} term:{}\".format(\n self._study_id, self._function_name, to_vocabulary_id, to_concept_code))\n self._to_vocabulary_id = rows[0]['to_vocabulary_id']\n self._to_concept_code = rows[0]['to_concept_code']\n self._to_table = rows[0]['to_table']\n self._to_column = rows[0]['to_column']\n self._expression = rows[0]['expression']\n self._concept_id = Concept.objects.get(vocabulary_id=self._to_vocabulary_id, concept_code=self._to_concept_code).concept_id\n self._argument_descriptions = self._get_function_argument_descriptions()\n cursor.close()\n\n def __str__(self):\n return \"CalculatedFieldFunction: \" + str(self._study_id) + \", \" + self._function_name + \", \" + str(self._observation_number) + \", \" + str(self._function_ref) + \", \" + self._to_vocabulary_id + \", \" + self._to_concept_code + \", \" + self._to_table\n\n\n def _get_function_argument_descriptions(self):\n \"\"\"\n Returns a list of triples [(vocabulary_id, concept_code, from_table)] in the order they should be passed.\n \"\"\"\n\n cursor = self._connection.cursor()\n stmt = (\"SELECT vocabulary_id, concept_code, from_table, argument_name \"\n \" FROM ohdsi_calculation_argument \"\n \"WHERE function_name = %s\"\n \" AND study_id = %s\"\n \" AND to_vocabulary_id = %s\"\n \" AND to_concept_code = %s\"\n \"ORDER BY argument_order \")\n cursor.execute(stmt, (self._function_name, str(self._study_id), self._to_vocabulary_id, self._to_concept_code))\n tuples = cursor.fetchall()\n cursor.close()\n return tuples\n\n def _get_function_argument_values(self, patient_id):\n \"\"\" Queries the observation table for the value of this argument for the given patient\n Returns a tuple of lists ([(value_as_string, value_as_number, value_as_concept_id)],[date])\n The first is a list of triples, one triple for each input variable. It's a triple to deal with\n the different data types: string, number, concept. The second list is a list of dates that\n corresponds to the first. They are the date the datum was collected.\n \"\"\"\n\n values = []\n dates = []\n names = [] \n i=1;\n stmt=''\n cursor = self._connection.cursor()\n for (vocabulary_id, concept_code, from_table, argument_name) in self._argument_descriptions:\n rows=None\n if from_table == 'observation':\n stmt = (\"SELECT observation_date, value_as_string, value_as_number, value_as_concept_id \"\n \" FROM observation o, concept c \"\n \" WHERE o.person_id = %s\"\n \" AND o.observation_concept_id = c.concept_id \"\n \" AND c.vocabulary_id = %s\"\n \" AND c.concept_code = %s\"\n \" AND (value_as_string is not null or value_as_number is not null or value_as_concept_id is not null)\"\n \" order by observation_date\") # TODO crude method for choosing wich value here\n cursor.execute(stmt, (str(patient_id), str(vocabulary_id), str(concept_code)))\n rows = cursor.fetchall()\n logger.debug(\"STMT-obs:%s ARGS:%s\", stmt, (str(patient_id), str(vocabulary_id), str(concept_code)))\n elif from_table == 'measurement':\n stmt = (\"SELECT measurement_date, null, value_as_number, value_as_concept_id \"\n \" FROM measurement m, concept c \"\n \" WHERE m.person_id = %s\"\n \" AND m.measurement_concept_id = c.concept_id \"\n \" AND c.vocabulary_id = %s\"\n \" AND c.concept_code = %s\"\n \" AND (value_as_number is not null or value_as_concept_id is not null)\"\n \" order by measurement_date\") # TODO crude method for choosing which value here\n cursor.execute(stmt, (str(patient_id), str(vocabulary_id), str(concept_code)))\n rows = cursor.fetchall()\n logger.debug(\"STMT-meas:%s ARGS:%s\", stmt, (str(patient_id), str(vocabulary_id), str(concept_code)))\n elif from_table == 'death':\n if concept_code == 'x' and vocabulary_id == 'x': # TODO consider null here? ...but the index on the table won't allow it for now\n stmt = (\"SELECT death_date, null, null, cause_concept_id \"\n \" FROM death d\"\n \" WHERE d.person_id = %s\")\n cursor.execute(stmt, (patient_id,))\n rows = cursor.fetchall()\n else:\n stmt = (\"SELECT death_date, null, null, cause_concept_id \"\n \" FROM death d, concept c\"\n \" WHERE d.person_id = %s\"\n \" AND d.cause_concept_id = c.concept_id \"\n \" AND c.vocabulary_id = %s\"\n \" AND c.concept_code = %s\")\n cursor.execute(stmt, (patient_id, str(vocabulary_id), str(concept_code)))\n rows = cursor.fetchall()\n logger.debug(\"STMT-death = %s\", stmt)\n elif from_table == 'dual' and vocabulary_id == 'dual':\n # TODO - this is twisted. In order to pass in constants, the concept_code is the value, rather than part of a join to go get the value\n # Issue #49 mentions the business about using the string 'null' to slip NULL values past the PK/FK limitations.\n # measurement date irrelevant\n #stmt = (\"SELECT observation_date, value_as_string, value_as_number, value_as_concept_id \"\n\n #NB: the calling function acts differently when the table is \"dual\". Instead of reading the \n #value associated with (vocab_id, concept_code) in the named table (like observation), the \n #concept_code is return. It's mind bending and meta because often the concept_code is rather a concept_id.\n #In this case you're matching concept IDs instead of values associated with concepts.\n # see #135, #77\n\n if concept_code == 'null':\n logger.debug(\"DUAL-1 %s\", concept_code)\n stmt = \"SELECT '2001-01-01', Null, Null, Null\"\n cursor.execute(stmt)\n else:\n logger.debug(\"DUAL-2 %s\", concept_code)\n # we have these values, just running select to get them in the form returned by the database (result set?)\n stmt = \"SELECT '2001-01-01', %s, %s, %s\"\n #cursor.execute(stmt, ( str(concept_code), float(concept_code), int(concept_code) ))\n cursor.execute(stmt, ( str(concept_code), float(concept_code), float(concept_code) ))\n rows = cursor.fetchall()\n logger.debug(\"STMT-dual = %s\", stmt)\n\n else:\n logger.error(\"_get_function_argument_values(): table name not recognzied %s vocab:%s\", from_table, vocabulary_id)\n\n logger.warning(\"_get_function_argument_values(): ARG #%d vocab:%s, code:%s, table:%s, name:%s ROWS:%s\", i, vocabulary_id, concept_code, from_table, argument_name, len(rows)) \n i = i+1\n\n if rows:\n if (len(rows) > 1) :\n logger.info(\"DEV: ...we got more than a single row back in _get_function_argument_values for pid:%s from_table:%s %s, %s\", \n patient_id, vocabulary_id, concept_code, rows)\n for row in rows[:1]: # CHRIS\n (date, value_as_string, value_as_number, value_as_concept_id) = row\n values.append((value_as_string, value_as_number, value_as_concept_id))\n dates.append(date)\n logger.debug(\"_get_function_argument_values(): %d %s\", len(rows), stmt) \n names.append(argument_name)\n else:\n values.append((None, None, None))\n dates.append('2001-01-01')\n names.append(argument_name)\n logger.debug(\"_get_function_argument_values(): no rows returned for study:%s pid:%s arg_desc:%s len:%d pair:%s %s %s %s\",\n self._study_id, patient_id, str(self._argument_descriptions), len(self._argument_descriptions), vocabulary_id, concept_code, from_table, stmt)\n cursor.close()\n return (values, dates, names)\n\n #TODO add a sequence or auto-increment to these id columns\n def _get_max_observation_number(self):\n ''' returns the maximum observation number from the table (TODO OBSOLETE) '''\n stmt = \"SELECT max(observation_id) from observation\"\n cursor = self._connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(stmt)\n rows = cursor.fetchall()\n cursor.close()\n return rows[0]['max']\n\n #TODO add a sequence or auto-increment to these id columns\n def _get_max_measurement_number(self):\n ''' returns the maximum measurement number from the table (TODO OBSOLETE) '''\n stmt = \"SELECT max(measurement_id) from measurement\"\n cursor = self._connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(stmt)\n rows = cursor.fetchall()\n cursor.close()\n return rows[0]['max']\n\n def _delete_function_value_for_person(self, person_id, concept_id):\n cursor = self._connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n if self._to_table == 'measurement':\n meas_stmt = 'DELETE from measurement where person_id=%s and measurement_concept_id=%s;'\n cursor.execute(meas_stmt, (str(person_id), concept_id))\n elif self._to_table == 'observation':\n obs_stmt = 'DELETE from observation where person_id=%s and observation_concept_id=%s;'\n cursor.execute(obs_stmt, (str(person_id), concept_id))\n \n def _insert_function_value(self, person_id, visit_date, value, concept_id):\n \"\"\" inserts a value into the observation table by vocabulary_id and its concept_code.\n NB: Don't confuse concept_code and concept_id.\n In OHDSI's observation table, there is an indirection through the concept table keyed\n by concept_id, hence the odd name concept_code for the term or concept's id in the\n context of the vocabulary.\n \"\"\"\n try:\n cursor = self._connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n obs_stmt = \"INSERT into observation (observation_id, person_id, observation_concept_id, observation_date,\"\\\n + self._to_column + \", observation_type_concept_id)\" \\\n + \" VALUES (%s, %s, %s, %s, %s,%s)\"\n meas_stmt = \"INSERT into measurement (measurement_id, person_id, measurement_concept_id, measurement_date,\"\\\n + self._to_column + \", measurement_type_concept_id)\" \\\n + \" VALUES (%s, %s, %s, %s, %s,%s)\"\n if self._to_table == 'measurement':\n logger.debug(\"calculate._insert_function_value() insert into meas. obs_id:%s person_id:%s value:%s concept_id:%s date:%s\",\n self._measurement_number, person_id, value, concept_id, visit_date)\n cursor.execute(meas_stmt, (str(self._measurement_number), str(person_id), self._concept_id, visit_date, value, 38000278))\n self._measurement_number += 1\n elif self._to_table == 'observation':\n logger.debug(\"calculate._insert_function_value() insert into obs. obs_id:%s person_id:%s value:%s concept_id:%s date:%s\",\n self._observation_number, person_id, value, concept_id, visit_date)\n cursor.execute(obs_stmt, (str(self._observation_number), str(person_id), self._concept_id, visit_date, value, 38000278))\n self._observation_number += 1\n else:\n logger.error(\"BAD TABLE %s\", self._to_table) \n raise Exception(\"BAD TABLE in _insert_function_value {}\".format(self))\n except Exception as e:\n logger.error(\"RAISE: %s\", e)\n if self._to_table == 'measurement':\n logger.error(\" MEAS STMT: %s\", meas_stmt);\n else:\n logger.error(\" OBS STMT: %s\", obs_stmt);\n logger.error(\" person_id:%s concept:%s value:%s\", person_id, self._concept_id, value) \n cursor.close()\n raise e\n \n cursor.close()\n\n\n \n \n def _run_function_for_person(self, person_id, concept_id):\n ''' runs a calculation function for a single person '''\n\n\n logger.info(\"DOING-1 %s %s\", self._function_name, concept_id)\n\n ## GET DATA\n (input_values, dates, names) = self._get_function_argument_values(person_id)\n\n\n ## PROCESS\n if (len(input_values) > 0 or self._function_name == 'true')\\\n and (self._function_name == 'map_concept_id' \\\n or self._function_name == 'ranges_to_rank' \\\n or (len(input_values) == len(names) and len(input_values) == len(self._argument_descriptions)) ):\n visit_date = EPOCH_START_DEFAULT_DATE\n if len(dates) > 0:\n visit_date = dates[0] # TODO, a little arbitrary. The values selected are \n # the first non-null and here the date is the date that went with the first concept...\n \n # SIGNATURE 1 (dates, values, names, expression) This form allows the \n # function to take a variable number of arguments.\n # TODO this (being a member of this list issue #47) should be an explicit attribute on the function\n if self._function_name == 'concept_or_list' \\\n or self._function_name == 'concept_and_list' \\\n or self._function_name == 'sum' \\\n or self._function_name == 'eos_to_death_days' \\\n or self._function_name == 'eos_days' \\\n or self._function_name == 'eos_death_min_days' \\\n or self._function_name == 'eos_death_max_days' \\\n or self._function_name == 'death_days' \\\n or self._function_name == 'corona_smoking_to_yesno'\\\n or self._function_name == 'concept_to_int'\\\n or self._function_name == 'ranges_to_rank'\\\n or self._function_name == 'run_simple_eval'\\\n or self._function_name == 'map_concept_id':\n\n\n try:\n # different signatures...\n if self._function_name == 'run_simple_eval':\n logger.debug(\" VARS SIGNATURE 1-B names:%s input_values:%s\", names, input_values) \n output_value=None\n for i in range(0, len(names)):\n bogus_input_flag = False\n if input_values[i][1] is None:\n bogus_input_flag = True\n break\n if (not bogus_input_flag):\n output_value = self._function_ref(dates, input_values, names, expression=self._expression)\n logger.info(\"....SIGNATURE 1-B person:%s concept_id:%s f:%s out:%s, expr:%s\", \n person_id, concept_id, self._function_name, output_value, self._expression)\n else:\n logger.warning(\"....SIGNATURE 1-B skipping function because of BOGUS input values to SIMPLE EVAL. person:%s concept_id:%s f:%s out:%s, expr:%s bougs:%s\", \n person_id, concept_id, self._function_name, output_value, self._expression, input_values)\n else:\n output_value = self._function_ref(dates, input_values, names, self._expression)\n except Exception as e:\n logger.error(\"calculate.py _run_function_for_person raised %s\", e)\n logger.error(\" calculate.py self: %s\", str(self))\n logger.error(\" calculate.py dates:%s input_values:%s names:%s expr:%s\", \n str(dates), str(input_values), str(names), str(self._expression) )\n (exc_type, exc_value, exc_traceback) = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=6, file=sys.stdout)\n raise e\n if output_value is None or str(output_value) == \"-1\":\n logger.warning(\" returned None for person:%s, skipping insertion. concept_id:%s, date:%s, arg:%s input:%s fname:%s\",\n person_id, concept_id, visit_date, self._argument_descriptions, input_values, self._function_name)\n else:\n logger.info(\"inserting output:%s concept:%s function:%s\", output_value, concept_id, self._function_name)\n self._insert_function_value(person_id, visit_date, output_value, concept_id)\n \n \n # SIGNATURE 2 This form allows the function to take a specific list of arguments rather than a list.\n elif len(input_values) == len(self._argument_descriptions):\n logger.info(\"....SIGNATURE 2 %s %s %s\", person_id, concept_id, self._function_name)\n output_value = self._function_ref(dates, *input_values)\n if output_value is None or str(output_value) == \"-1\":\n logger.warning(\"_run_function_for_person() returned None for person:%s, skipping insertion. , date:%s, arg:%s input:%s fname:%s\",\n person_id, visit_date, self._argument_descriptions, input_values, self._function_name)\n else:\n logger.info(\"inserting output:%s concept:%s function:%s\", output_value, concept_id, self._function_name)\n self._insert_function_value(person_id, visit_date, output_value, concept_id) \n \n else:\n logger.error(\"wrong number of values %s for study_id:%s person:%s concept:%s, date:%s, desc:%s, val:%s fname:%s dates:%s\",\n len(input_values), self._study_id, person_id, concept_id, visit_date, self._argument_descriptions,\n input_values, self._function_name, dates)\n else:\n logger.error(\"no argument values\")\n\n\n\n def run_function(self):\n ''' runs a fucntion over the set of all persons '''\n person_ids = self._person_obj.get_study_person_ids(self._connection)\n person_count = 0\n for person_id in person_ids:\n if person_count % 1000 == 0:\n logger.info(\" name:%s count:%d concept:%s\", self._function_name, person_count, self._concept_id)\n person_count += 1\n self._delete_function_value_for_person(person_id, self._concept_id)\n self._run_function_for_person(person_id, self._concept_id)\n\n\ndef calculate_all_functions(connection, study_id):\n ''' calculates all functions over all persons '''\n tuples = get_function_instances(connection, study_id)\n for function_tuple in tuples:\n ccf = CalculatedFieldFunction(connection, study_id, function_tuple[0], function_tuple[1], function_tuple[2], function_tuple[4])\n logger.info(\"Calculating function:%s study:%s name:%s vocab;%s term:%s FUN: %s\", function_tuple, study_id, function_tuple[0], function_tuple[1], function_tuple[2], ccf)\n ccf.run_function()\n\n\ndef get_function_instances(connection, study_id):\n cur = connection.cursor()\n cur.execute((\"SELECT distinct function_name, to_vocabulary_id, to_concept_code, function_order, module_name \"\n \"FROM ohdsi_calculation_function \"\n \"WHERE study_id = %s \"\n \"ORDER BY function_order\"), (study_id,))\n key_tuples = cur.fetchall()\n cur.close()\n return key_tuples\n\n\ndef main(db_name, user_name, study_name) :\n\n logger.info(\"CALCULATE ARGS:%s %s %s\", db_name, user_name, study_name)\n logger.info(\"connecting to %s %s\", db_name, user_name)\n con = psycopg2.connect(database=db_name, user=user_name)\n con.autocommit = True\n (study_id, observation_range_start, observation_range_end, _, _) = get_study_details(con, study_name)\n\n calculate_all_functions(con, study_id)\n logger.warning(\"CALCULATE complete\")\n\n update_stmt = 'UPDATE study set calculated=\\'t\\' where study_name=%s'\n update_cur = con.cursor()\n try:\n update_cur.execute(update_stmt, (study_name,) )\n except Exception as e:\n logger.error(\"unable to mark %s as calculated: %s, %s\", study_name, e, update_stmt)\n raise e\n con.close()\n", "sub_path": "django_harmonization/HeartData/calculate.py", "file_name": "calculate.py", "file_ext": "py", "file_size_in_byte": 25623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 53, "usage_type": "call"}, {"api_name": "HeartData.person.BasePerson.factory_on_id", "line_number": 81, "usage_type": "call"}, {"api_name": "HeartData.person.BasePerson", "line_number": 81, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 82, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ui.models.Concept.objects.get", "line_number": 111, "usage_type": "call"}, {"api_name": "ui.models.Concept.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "ui.models.Concept", "line_number": 111, "usage_type": "name"}, {"api_name": "psycopg2.extras", "line_number": 248, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 258, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 265, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 281, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 378, "usage_type": "call"}, {"api_name": "traceback.print_tb", "line_number": 379, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 379, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 445, "usage_type": "call"}, {"api_name": "HeartData.study.get_study_details", "line_number": 447, "usage_type": "call"}]} +{"seq_id": "520293584", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport functools\nimport sys\nimport warnings\nimport matplotlib\nif not hasattr(sys, \"ps1\"):\n matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit, OptimizeWarning\nfrom covsirphy.cleaning.word import Word\nfrom covsirphy.phase.sr_data import SRData\n\n\nclass Trend(Word):\n \"\"\"\n S-R trend analysis in a phase.\n \"\"\"\n\n def __init__(self, clean_df, population,\n country, province=None, start_date=None, end_date=None):\n \"\"\"\n @clean_df : cleaned data\n - index : reset index\n - Date : Observation date\n - Country : country/region name\n - Province : province/prefecture/sstate name\n - Confirmed : the number of confirmed cases\n - Infected : the number of currently infected cases\n - Fatal : the number of fatal cases\n - Recovered : the number of recovered cases\n @population : total population in the place\n @country : country name\n @province : province name\n @start_date : start date, like 22Jan2020\n @end_date : end date, like 01Feb2020\n \"\"\"\n self.population = population\n if province is None:\n self.area = country\n else:\n self.area = f\"{country}{self.SEP}{province}\"\n sr_data = SRData(\n clean_df, country=country, province=province\n )\n self.train_df = sr_data.make(\n population, start_date=start_date, end_date=end_date\n )\n self.result_df = None\n # Start date\n self.start_date = self.train_df.index.min()\n if start_date is not None:\n self.start_date = max(\n self.start_date,\n datetime.strptime(start_date, self.DATE_FORMAT)\n )\n self.start_date = self.start_date.strftime(self.DATE_FORMAT)\n # End date\n self.end_date = self.train_df.index.max()\n if end_date is not None:\n self.end_date = min(\n self.end_date,\n datetime.strptime(end_date, self.DATE_FORMAT)\n )\n self.end_date = self.end_date.strftime(self.DATE_FORMAT)\n\n def analyse(self):\n \"\"\"\n Perform curve fitting of S-R trend\n with negative exponential function and save the result.\n \"\"\"\n self.result_df = self._fitting(self.train_df)\n\n def _fitting(self, train_df):\n \"\"\"\n Perform curve fitting of S-R trend\n with negative exponential function.\n @train_df : training dataset\n - index (Date) : Observation date\n - Recovered: The number of recovered cases\n - Susceptible_actual: Actual data of Susceptible\n @return \n - index (Date) : Observation date\n - Recovered: The number of recovered cases\n - Susceptible_actual: Actual values of Susceptible\n - Susceptible_predicted: Predicted values of Susceptible\n \"\"\"\n df = train_df.copy()\n # Calculate initial values of parameters\n x_series = df[self.R]\n y_series = df[f\"{self.S}{self.A}\"]\n a_ini = y_series.max()\n try:\n b_ini = y_series.diff().reset_index(drop=True)[1] / a_ini\n except KeyError:\n raise KeyError(\"The length of @train_df must be over 2.\")\n # Curve fitting with negative exponential function\n warnings.simplefilter(\"ignore\", OptimizeWarning)\n param, _ = curve_fit(\n self.negative_exp, x_series, y_series,\n p0=[a_ini, b_ini]\n )\n # Predict the values with the parameters\n f_partial = functools.partial(\n self.negative_exp, a=param[0], b=param[1]\n )\n df[f\"{self.S}{self.P}\"] = x_series.apply(\n lambda x: f_partial(x)\n ).astype(np.int64)\n return df\n\n def rmsle(self):\n \"\"\"\n Calculate RMSLE score of actual/predicted Susceptible.\n @return \n \"\"\"\n df = self.result_df.copy()\n if df is None:\n raise NameError(\"Must perform Trend().run() in advance.\")\n actual = df[f\"{self.S}{self.A}\"]\n predicted = df[f\"{self.S}{self.P}\"]\n # Calculate RMSLE score\n scores = np.abs(\n np.log10(actual + 1) - np.log10(predicted + 1)\n )\n return scores.sum()\n\n def result(self):\n \"\"\"\n Show the result as a dataframe.\n \"\"\"\n return self.result_df\n\n def show(self, filename=None):\n \"\"\"\n show the result as a figure.\n @show_figure :\n - if True, show the history as a pair-plot of parameters.\n @filename : filename of the figure, or None (show figure)\n \"\"\"\n df = self.result()\n df[\"Predicted\"] = df[f\"{self.S}{self.P}\"]\n title = f\"{self.area}: S-R trend from {self.start_date} to {self.end_date}\"\n self.show_with_many(\n result_df=df, predicted_cols=[\"Predicted\"],\n title=title,\n filename=filename\n )\n\n @classmethod\n def show_with_many(cls, result_df, predicted_cols,\n title, vlines=None, filename=None):\n \"\"\"\n show the result as a figure.\n @result_df : training dataset\n - index (Date) : Observation date\n - Recovered: The number of recovered cases\n - Susceptible_actual: Actual values of Susceptible\n - columns defined by @columns\n @predicted_cols :\n - list of columns which have predicted values\n @title : title of the figure\n @vlines :\n - list of Recovered values to show vertical lines\n @filename : filename of the figure, or None (show figure)\n \"\"\"\n df = result_df.copy()\n if df is None:\n raise NameError(\"Must perform Trend().run() in advance.\")\n x_series = df[cls.R]\n actual = df[f\"{cls.S}{cls.A}\"]\n # Plot the actual values\n plt.plot(\n x_series, actual,\n label=\"Actual\", color=\"black\",\n marker=\".\", markeredgewidth=0, linewidth=0\n )\n # Plot the predicted values\n if len(predicted_cols) == 1:\n plt.plot(x_series, df[predicted_cols[0]], label=\"Regression\")\n else:\n for col in predicted_cols:\n plt.plot(x_series, df[col], label=col.replace(cls.P, str()))\n # x-axis\n plt.xlabel(cls.R)\n plt.xlim(0, None)\n # y-axis\n plt.ylabel(cls.S)\n plt.yscale(\"log\", basey=10)\n # Delete y-labels of log-scale (minor) axis\n plt.setp(plt.gca().get_yticklabels(minor=True), visible=False)\n plt.gca().tick_params(left=False, which=\"minor\")\n # Set new y-labels of major axis\n ymin, ymax = plt.ylim()\n ydiff_scale = int(np.log10(ymax - ymin))\n yticks = np.linspace(\n round(ymin, - ydiff_scale),\n round(ymax, - ydiff_scale),\n 5,\n dtype=np.int64\n )\n plt.gca().set_yticks(yticks)\n fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)\n fmt.set_scientific(False)\n plt.gca().yaxis.set_major_formatter(fmt)\n # Title\n plt.title(title)\n # Vertical lines\n if isinstance(vlines, (list, tuple)):\n for value in vlines:\n plt.axvline(x=value, color=\"black\", linestyle=\":\")\n # Legend\n plt.legend(\n bbox_to_anchor=(1.02, 0), loc=\"lower left\", borderaxespad=0\n )\n # Save figure or show figure\n warnings.simplefilter(\"ignore\", UserWarning)\n plt.tight_layout()\n if filename is None:\n plt.show()\n return None\n plt.savefig(\n filename, bbox_inches=\"tight\", transparent=False, dpi=300\n )\n plt.clf()\n return None\n", "sub_path": "covsirphy/phase/trend.py", "file_name": "trend.py", "file_ext": "py", "file_size_in_byte": 8186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "covsirphy.cleaning.word.Word", "line_number": 18, "usage_type": "name"}, {"api_name": "covsirphy.phase.sr_data.SRData", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "warnings.simplefilter", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.optimize.OptimizeWarning", "line_number": 101, "usage_type": "argument"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 102, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 203, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.ticker.ScalarFormatter", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 206, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "warnings.simplefilter", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}]} +{"seq_id": "263967231", "text": "\nfrom django.http import HttpResponse, JsonResponse, FileResponse\nimport hashlib\nfrom django.core import serializers\nimport base64\nimport os\nfrom wsgiref.util import FileWrapper\nimport zipfile\nfrom io import BytesIO\nfrom wsgiref.util import FileWrapper\n\nimport json\nfrom web3 import Web3, HTTPProvider\nfrom ethereum.utils import ecrecover_to_pub, sha3\nfrom eth_utils import encode_hex, decode_hex, add_0x_prefix\nimport codecs\nfrom gatekeeper.models import Details, Accounts\nfrom django.forms.models import model_to_dict\nfrom django.conf import settings\n\nimport requests\n\n\n#TODO setup to allow user to determine network\nweb3 = Web3(HTTPProvider('https://rinkeby.infura.io'))\n\ncontractAddress = getattr(settings, 'CONTRACT_ADDRESS')\nthisServices = set(getattr(settings, 'SERVICE_IDS'))\n\nwith open('./gatekeeper/factoryDRS.json', 'r') as abi_definition:\n abi = json.load(abi_definition)\nfContract = web3.eth.contract(contractAddress,abi=abi)\n\ndef index(request):\n \"\"\"Index to test connection to server.\n\n Tests connection to server\n\n Args:\n request: a request object\n Returns:\n A successful connection string string\n \"\"\"\n return HttpResponse(\"Hello, You're at the gatekeeper\")\n\ndef data(request, address_id, signature, message_hash, parameter, key_hex):\n \"\"\"Fetches rows data from sql database or file folder.\n\n Validates user and request agains the blockchain.\n If passes either returns json object or file based on request\n\n Args:\n request: a request object\n address_id: The address id of the address_id\n signature: The requesters signature.\n message_hash: The hashed message. Used for signature verification\n parameter: the parameter to use to determine the database and parameter\n key: the id of the key to check against\n\n Returns:\n A json object containing data or a file to the requestor\n \"\"\"\n try:\n parameter_hex=web3.fromAscii(parameter)\n parameter_hex_data=parameter_hex[2:]\n key_bytes=web3.toBytes(hexstr=key_hex)\n parameter_bytes=web3.toBytes(hexstr=parameter_hex)\n\n #recover public key\n r = int(signature[0:66], 16)\n s = int(add_0x_prefix(signature[66:130]), 16)\n v = int(add_0x_prefix(signature[130:132]), 16)\n if v not in (27,28):\n v += 27\n pubkey = ecrecover_to_pub(decode_hex(message_hash), v, r, s)\n\n #retrieves information from key based on parameter\n account_id=fContract.call().getKeyData(key_bytes,parameter_bytes)\n account_id=account_id.strip()\n owner=fContract.call().isKeyOwner(key_bytes,address_id)\n hexId=web3.fromAscii(account_id)\n\n if parameter == 'account_number':\n account_id=int(hexId.rstrip(\"0\"), 16)\n\n #Get the service this key belongs too\n keyData=fContract.call().getKey(key_bytes)\n serviceFromKey = web3.fromAscii(keyData[4])\n phuse_number=Accounts.objects.get(public_key=address_id)\n if parameter == 'file' and serviceFromKey in thisServices and encode_hex(sha3(pubkey)[-20:]) == address_id and owner:\n module_dir = os.path.dirname(__file__) # get current directory\n filename=module_dir+'/file/'+account_id\n filename=filename.strip()\n filename=filename.strip('\\x00')\n url = 'http://localhost:3000/asset/upload/'+str(phuse_number)\n files = {'file':open(filename,'rb')}\n r = requests.post(url, files=files)\n return FileResponse(open(filename, 'rb'))\n else:\n print(': fail :')\n return JsonResponse({'status':'false','message':'Invalid user'}, status=500)\n\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\ndef register(request, address_id, signature, message_hash, phuse_number ):\n \"\"\"Fetches rows data from sql database or file folder.\n\n Validates user and request agains the blockchain.\n If passes either returns json object or file based on request\n\n Args:\n request: a request object\n address_id: The address id of the address_id\n signature: The requesters signature.\n message_hash: The hashed message. Used for signature verification\n parameter: the parameter to use to determine the database and parameter\n key: the id of the key to check against\n\n Returns:\n A json object containing data or a file to the requestor\n \"\"\"\n try:\n #recover public key\n r = int(signature[0:66], 16)\n s = int(add_0x_prefix(signature[66:130]), 16)\n v = int(add_0x_prefix(signature[130:132]), 16)\n if v not in (27,28):\n v += 27\n pubkey = ecrecover_to_pub(decode_hex(message_hash), v, r, s)\n #Get the service this key belongs too\n if encode_hex(sha3(pubkey)[-20:]) == address_id:\n # accountUpdate, createdAccount = Accounts.object.update_or_create(phuse_number=address_id, public_key=phuse_number)\n obj, created = Accounts.objects.update_or_create(\n public_key=address_id,\n defaults={'phuse_number': phuse_number, 'public_key':address_id},\n )\n print('correct2',created,obj)\n return JsonResponse({'status':'Success','message':'Account Registered'}, status=201)\n else:\n return JsonResponse({'status':'false','message':'Invalid user'}, status=500)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n", "sub_path": "PythonClientV1/ClientGateKeeper/gatekeeper/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "web3.Web3", "line_number": 25, "usage_type": "call"}, {"api_name": "web3.HTTPProvider", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 28, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "web3.eth.contract", "line_number": 32, "usage_type": "call"}, {"api_name": "web3.eth", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "web3.fromAscii", "line_number": 64, "usage_type": "call"}, {"api_name": "web3.toBytes", "line_number": 66, "usage_type": "call"}, {"api_name": "web3.toBytes", "line_number": 67, "usage_type": "call"}, {"api_name": "eth_utils.add_0x_prefix", "line_number": 71, "usage_type": "call"}, {"api_name": "eth_utils.add_0x_prefix", "line_number": 72, "usage_type": "call"}, {"api_name": "ethereum.utils.ecrecover_to_pub", "line_number": 75, "usage_type": "call"}, {"api_name": "eth_utils.decode_hex", "line_number": 75, "usage_type": "call"}, {"api_name": "web3.fromAscii", "line_number": 81, "usage_type": "call"}, {"api_name": "web3.fromAscii", "line_number": 88, "usage_type": "call"}, {"api_name": "gatekeeper.models.Accounts.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "gatekeeper.models.Accounts.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "gatekeeper.models.Accounts", "line_number": 89, "usage_type": "name"}, {"api_name": "eth_utils.encode_hex", "line_number": 90, "usage_type": "call"}, {"api_name": "ethereum.utils.sha3", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 97, "usage_type": "call"}, {"api_name": "django.http.FileResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 101, "usage_type": "call"}, {"api_name": "eth_utils.add_0x_prefix", "line_number": 127, "usage_type": "call"}, {"api_name": "eth_utils.add_0x_prefix", "line_number": 128, "usage_type": "call"}, {"api_name": "ethereum.utils.ecrecover_to_pub", "line_number": 131, "usage_type": "call"}, {"api_name": "eth_utils.decode_hex", "line_number": 131, "usage_type": "call"}, {"api_name": "eth_utils.encode_hex", "line_number": 133, "usage_type": "call"}, {"api_name": "ethereum.utils.sha3", "line_number": 133, "usage_type": "call"}, {"api_name": "gatekeeper.models.Accounts.objects.update_or_create", "line_number": 135, "usage_type": "call"}, {"api_name": "gatekeeper.models.Accounts.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "gatekeeper.models.Accounts", "line_number": 135, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 140, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "134489083", "text": "import json\nimport re\nfrom pprint import pprint\n\nimport pytest\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom data.app_data import htaccess\nfrom fixture import rest\n\ndev = \"assurancer.smashedmedia.guru\"\n\n\ndef test_Pages_Lassie(rest):\n doc = rest.get_data(htaccess+dev).text\n links = rest.find_(in_=doc,item='a::attr(href)')\n pages = rest.get_all_pages_Info_over_Website(dev,links)\n urls = [rest.convert_dev_url(page) for page in pages]\n pprint([rest.get_Lassie_object(url)for url in urls])\n\n\ndef testAssure_Links_load(bmp,rest):\n doc = rest.get_data(htaccess + dev).text\n links = rest.find_(in_=doc, item='a::attr(href)').getall()\n anchors = app.assure.filtering_links_for_Internal_and_external(links,dev)\n for page in anchors['internal']:\n dev_url = rest.convert_dev_url(page)\n inner = rest.get_data(dev_url).text\n inner_links = rest.find_(in_=inner, item='a::attr(href)').getall()\n print('Page : '+page)\n pprint(inner_links)\n bmp.open(dev_url)\n bmp.fullpage_screenshot('{}.png'.format(app.driver.title),scroll_delay=1)\n\n\ndef test_Pages(rest):\n doc = rest.get_data(htaccess + dev).text\n links = rest.find_(in_=doc, item='a::attr(href)').getall()\n pages = rest.get_all_pages_Info_over_Website(dev, links)\n urls = [rest.convert_dev_url(page) for page in pages]\n for url in urls:\n reg_page = rest.get_data(url).text\n reg_inner_links = rest.find_(in_=reg_page, item='::attr(href)').getall()\n print('Page : ' + url)\n pprint(reg_inner_links)\n\n\ndef test_responssive(emulator,rest):\n doc = rest.get_data(htaccess + dev).text\n links = rest.find_(in_=doc, item='a::attr(href)').getall()\n anchors = emulator.assure.filtering_links_for_Internal_and_external(links, dev)\n for page in anchors['internal']:\n dev_url = rest.convert_dev_url(page)\n emulator.open(dev_url)\n emulator.fullpage_screenshot(\"mobile_{}.png\".format(emulator.driver.title), scroll_delay=2)\n", "sub_path": "tests_Assurance_realty/test_AR_common.py", "file_name": "test_AR_common.py", "file_ext": "py", "file_size_in_byte": 2028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "fixture.rest.get_data", "line_number": 16, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 16, "usage_type": "name"}, {"api_name": "data.app_data.htaccess", "line_number": 16, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 17, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 17, "usage_type": "name"}, {"api_name": "fixture.rest.get_all_pages_Info_over_Website", "line_number": 18, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 18, "usage_type": "name"}, {"api_name": "fixture.rest.convert_dev_url", "line_number": 19, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 19, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 20, "usage_type": "call"}, {"api_name": "fixture.rest.get_Lassie_object", "line_number": 20, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 20, "usage_type": "name"}, {"api_name": "fixture.rest.get_data", "line_number": 24, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 24, "usage_type": "name"}, {"api_name": "data.app_data.htaccess", "line_number": 24, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 25, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 25, "usage_type": "name"}, {"api_name": "fixture.rest.convert_dev_url", "line_number": 28, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 28, "usage_type": "name"}, {"api_name": "fixture.rest.get_data", "line_number": 29, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 29, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 30, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 30, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 32, "usage_type": "call"}, {"api_name": "fixture.rest.get_data", "line_number": 38, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 38, "usage_type": "name"}, {"api_name": "data.app_data.htaccess", "line_number": 38, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 39, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 39, "usage_type": "name"}, {"api_name": "fixture.rest.get_all_pages_Info_over_Website", "line_number": 40, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 40, "usage_type": "name"}, {"api_name": "fixture.rest.convert_dev_url", "line_number": 41, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 41, "usage_type": "name"}, {"api_name": "fixture.rest.get_data", "line_number": 43, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 43, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 44, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 44, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 46, "usage_type": "call"}, {"api_name": "fixture.rest.get_data", "line_number": 50, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 50, "usage_type": "name"}, {"api_name": "data.app_data.htaccess", "line_number": 50, "usage_type": "name"}, {"api_name": "fixture.rest.find_", "line_number": 51, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 51, "usage_type": "name"}, {"api_name": "fixture.rest.convert_dev_url", "line_number": 54, "usage_type": "call"}, {"api_name": "fixture.rest", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "35472707", "text": "'''\n File name: createLeague.py\n Author: Jeremy Driesler\n Date created: 20190227\n Date last modified: 20190227\n Python Version: 3.7.2\n'''\n\nimport datetime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom tabledef import Player, Team, TeamLineup\nfrom random import randint\nimport names\n \nengine = create_engine('sqlite:///E:\\Programming\\Projects\\BaseballGame\\ServerSide\\Database\\littleLeague.db', echo=True)\n \n# create a Session\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nnumberOfPlayersPerTeam = 25\nnumberOfTeamsInLeague = 30\nnumberOfPlayersTotal = numberOfPlayersPerTeam*numberOfTeamsInLeague\n\n# Creates 750 players\n\nfor i in range(numberOfPlayersTotal*2):\n firstName=str(names.get_first_name())\n lastName=str(names.get_last_name())\n birthDate=datetime.date(1981, 12, 27)\n position=\"NA\"\n homeTownCity=\"Torrance\"\n homeTownState=\"CA\"\n height=randint(100,140)\n weight=randint(50,90)\n favoriteSubject=\"Math\"\n favoriteFood=\"Pizza\"\n favoriteColor=\"Green\"\n favoriteTeam=\"Toronto WhoJays\"\n hobby=\"Game Dev\"\n favoriteVideoGame=\"FortKnight\"\n abilityToLearn=randint(1,100)\n intelligence=randint(1,100)\n workEthic=randint(1,100)\n okWithChange=randint(1,100)\n leadership=randint(1,100)\n handlesPressure=randint(1,100)\n battingSpeed=randint(1,100)\n battingEye=randint(1,100)\n battingArm=\"R\"\n battingPower=randint(1,100)\n battingType=\"Power\"\n battingPatience=randint(1,100)\n fieldingSpeed=randint(1,100)\n fieldingPower=randint(1,100)\n fieldingEye=randint(1,100)\n fieldingKnowledge=randint(1,100)\n fieldingBallHandling=randint(1,100)\n fieldingGlove=randint(1,100)\n fieldingArm=\"R\"\n pitchingArm=\"R\"\n pitchingStamina=randint(1,100)\n pitchingCommand=randint(1,100)\n pitchingControl=randint(1,100)\n pitchingFastBall=True\n pitchingSinker=False\n pitchingCutter=False\n pitchingCurveball=False\n pitchingSlider=False\n pitchingChangeup=False\n pitchingSplitter=False\n pitchingForkball=False\n pitchingCircleChange=False\n pitchingScrewball=False\n pitchingKnuckleCurve=False\n pitchingKnuckleball=False\n\n\n player = Player( \n firstName,\n lastName,\n birthDate,\n position,\n homeTownCity,\n homeTownState,\n height,\n weight,\n favoriteSubject,\n favoriteFood,\n favoriteColor,\n favoriteTeam,\n hobby,\n favoriteVideoGame,\n abilityToLearn,\n intelligence,\n workEthic,\n okWithChange,\n leadership,\n handlesPressure,\n battingSpeed,\n battingEye,\n battingArm,\n battingPower,\n battingType,\n battingPatience,\n fieldingSpeed,\n fieldingPower,\n fieldingEye,\n fieldingKnowledge,\n fieldingBallHandling,\n fieldingGlove,\n fieldingArm,\n pitchingArm,\n pitchingStamina,\n pitchingCommand,\n pitchingControl,\n pitchingFastBall,\n pitchingSinker,\n pitchingCutter,\n pitchingCurveball,\n pitchingSlider,\n pitchingChangeup,\n pitchingSplitter,\n pitchingForkball,\n pitchingCircleChange,\n pitchingScrewball,\n pitchingKnuckleCurve,\n pitchingKnuckleball)\n session.add(player)\n \n\n##Creating Teams\nfor i in range(numberOfTeamsInLeague+1):\n city= \"Bakersfield\" + str(i)\n state= \"California\" + str(i)\n name= \"Farmers\" + str(i)\n logo= \"farmers.jpg\" + str(i)\n team = Team(city, state, name, logo) \n\n session.add(team)\n\n#creating team linups\n\nfor i in range(numberOfTeamsInLeague+1):\n teamID = i\n for j in range(numberOfPlayersPerTeam+1):\n playerID = (numberOfPlayersPerTeam*i)+j+1\n isCurrent = 1\n team_lineup = TeamLineup(teamID, playerID, isCurrent)\n session.add(team_lineup)\n\n\n# commit the record the database\nsession.commit()", "sub_path": "ServerSide/Database/createLeague.py", "file_name": "createLeague.py", "file_ext": "py", "file_size_in_byte": 4352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 19, "usage_type": "call"}, {"api_name": "names.get_first_name", "line_number": 30, "usage_type": "call"}, {"api_name": "names.get_last_name", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 45, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 61, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "tabledef.Player", "line_number": 81, "usage_type": "call"}, {"api_name": "tabledef.Team", "line_number": 140, "usage_type": "call"}, {"api_name": "tabledef.TeamLineup", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "647098080", "text": "import requests\nimport json\n\ndef apiCallReturnJSON(token, method, api_url, payload):\n\n # TODO: Check if the token is still valid\n\n url = \"https://webexapis.com/v1/{}\".format(api_url)\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(token)\n }\n\n response = requests.request(method, url, headers=headers, data=payload)\n print(\"Response Status: {} \\n\".format(response))\n return json.loads(response.text)\n\n\ndef renewAccessToken():\n # TODO: Error handling\n\n json_file = open('parameters.json', 'r')\n parameters = json.load(json_file)\n json_file.close()\n\n base_url = \"https://webexapis.com/v1/access_token\"\n grant_type = \"refresh_token\"\n client_id = parameters['client_id']\n client_secret = parameters['client_secret']\n refresh_token = parameters['refresh_token']\n\n url = \"{}?grant_type={}&client_id={}&client_secret={}&refresh_token={}\".format(base_url, grant_type, client_id, client_secret, refresh_token)\n headers = {\n 'Accept':'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n payload = {}\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n print(\"Response Status: {} \\n\".format(response))\n response_json = json.loads(response.text)\n\n print(response_json)\n\n access_token = response_json['access_token']\n refresh_token = response_json['refresh_token']\n\n # # TODO: Logging of the token request\n\n json_file = open('parameters.json', 'w')\n\n parameters['access_token'] = access_token\n parameters['refresh_token'] = refresh_token\n\n json.dump(parameters, json_file)\n json_file.close()\n\n\ndef getPersonalToken():\n json_file = open('parameters.json', 'r')\n parameters = json.load(json_file)\n json_file.close()\n\n print(\"Access Token: {} \\n\".format(parameters['access_token']))\n\n return parameters['access_token']\n\ndef getBotToken():\n json_file = open('parameters.json', 'r')\n parameters = json.load(json_file)\n json_file.close()\n\n print(\"Access Token: {} \\n\".format(parameters['bot_token']))\n\n return parameters['bot_token']\n\n\ndef getAllMeetings():\n\n api_url = \"meetings?meetingType=meeting\"\n meetings = apiCallReturnJSON(getPersonalToken(), \"GET\", api_url, {})\n print(json.dumps(meetings, indent=4, sort_keys=True))\n \n api_url = \"meetings?meetingType=scheduledMeeting\"\n meetings = apiCallReturnJSON(getPersonalToken(), \"GET\", api_url, {})\n print(json.dumps(meetings, indent=4, sort_keys=True))\n\n\ndef sendBotMsg(message):\n\n # TODO: Think how to obtain the BOT room ID easily or when the room has been deleted.\n\n api_url = \"messages/\"\n\n # TODO: REMOVE HARD-CODED Value\n payload = {\n \"roomId\": \"Y2lzY29zcGFyazovL3VzL1JPT00vNGU3MjE3NGItOTQ5Yy0zZmQ4LWFmMjgtNmE3MDc1ZjY4OWJh\",\n \"text\": \"{}\".format(message)\n }\n payload = json.dumps(payload)\n\n response = apiCallReturnJSON(getBotToken(), \"POST\", api_url, payload)\n print(json.dumps(response, indent=4, sort_keys=True))\n\n\nif __name__ == \"__main__\":\n # sendBotMsg(\"Hello\")\n getAllMeetings()\n ", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.request", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "json.load", "line_number": 61, "usage_type": "call"}, {"api_name": "json.load", "line_number": 70, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 100, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "589415589", "text": "from datetime import datetime\nimport pymysql\nimport Controller.DB.DB_basic as db_basic\n\nprint(db_basic.db)\n\ndef dateFormat(date):\n res = datetime.strptime(date, \"%Y-%m-%d\").strftime('%Y/%#m/%#d')\n return res\n\ndef getPrice(date):\n db = pymysql.connect(host=db_basic.db['host'], user=db_basic.db['user'], password=db_basic.db['pwd'], database=db_basic.db['db_name'])\n dataInput = dateFormat(date)\n\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n\n # SQL 查询语句\n sql = \"SELECT closePrice FROM price WHERE date = '\" + dataInput + \"'\"\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n\n except:\n print(\"Error: unable to fetch data\")\n\n\n\n print(results)\n # 关闭数据库连接\n db.close()\n\n return results\n\n# getPrice('2020-09-09')", "sub_path": "SystemCode/backend/Bit_coin_v1.1/Controller/close_price/getPrice.py", "file_name": "getPrice.py", "file_ext": "py", "file_size_in_byte": 893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Controller.DB.DB_basic.db", "line_number": 5, "usage_type": "attribute"}, {"api_name": "Controller.DB.DB_basic", "line_number": 5, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "Controller.DB.DB_basic.db", "line_number": 12, "usage_type": "attribute"}, {"api_name": "Controller.DB.DB_basic", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "204540600", "text": "import logging\nfrom decimal import Decimal, InvalidOperation\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nlogger = logging.getLogger(\"traffic_control\")\n\n\nclass SoftDeleteModel(models.Model):\n is_active = models.BooleanField(_(\"Active\"), default=True)\n deleted_at = models.DateTimeField(_(\"Deleted at\"), blank=True, null=True)\n deleted_by = models.ForeignKey(\n get_user_model(),\n verbose_name=_(\"Deleted by\"),\n related_name=\"deleted_by_%(class)s_set\",\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n )\n\n class Meta:\n abstract = True\n\n def soft_delete(self, user):\n self.is_active = False\n self.deleted_at = timezone.now()\n self.deleted_by = user\n self.save()\n\n\nclass UserControlModel(models.Model):\n created_at = models.DateTimeField(_(\"Created at\"), auto_now_add=True)\n updated_at = models.DateTimeField(_(\"Updated at\"), auto_now=True)\n created_by = models.ForeignKey(\n get_user_model(),\n verbose_name=_(\"Created by\"),\n related_name=\"created_by_%(class)s_set\",\n on_delete=models.PROTECT,\n )\n updated_by = models.ForeignKey(\n get_user_model(),\n verbose_name=_(\"Updated by\"),\n related_name=\"updated_by_%(class)s_set\",\n on_delete=models.PROTECT,\n )\n\n class Meta:\n abstract = True\n\n\nclass UpdatePlanLocationMixin:\n \"\"\"A mixin class that updates plan location when the plan\n field of target model is changed\"\"\"\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n old_plan = None\n else:\n # remember the old plan when updating existing traffic\n # control objects\n old_plan = type(self).objects.get(pk=self.pk).plan\n super().save(*args, **kwargs)\n if self.plan != old_plan:\n # note that we also need to update the old plan location when\n # updating the plan field of existing traffic control objects.\n if old_plan:\n old_plan.derive_location_from_related_plans()\n if self.plan:\n self.plan.derive_location_from_related_plans()\n\n def delete(self, *args, **kwargs):\n super().delete(*args, **kwargs)\n if self.plan:\n self.plan.derive_location_from_related_plans()\n\n\nclass SourceControlModel(models.Model):\n source_id = models.CharField(\n _(\"Source id\"), max_length=64, null=True, blank=True, default=None\n )\n source_name = models.CharField(\n _(\"Source name\"), max_length=254, null=True, blank=True, default=None\n )\n\n class Meta:\n abstract = True\n\n\nclass DecimalValueFromDeviceTypeMixin:\n \"\"\"\n A model mixin class that saves device type value to the decimal value field\n\n Only set value field when the value field is empty and a default value\n is set in device type\n \"\"\"\n\n def save(self, *args, **kwargs):\n if not self.value and self.device_type and self.device_type.value:\n try:\n self.value = Decimal(self.device_type.value)\n except InvalidOperation:\n logger.warning(\"Cannot convert device type value to Decimal\")\n super().save(*args, **kwargs)\n", "sub_path": "traffic_control/mixins/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models.PROTECT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 38, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models.PROTECT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 44, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models.PROTECT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 85, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 103, "usage_type": "call"}, {"api_name": "decimal.InvalidOperation", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "449468617", "text": "from django.db import models\n\nclass Book(models.Model):\n\ttitle = models.CharField(max_length=255, blank=True)\n\tblurb = models.TextField(max_length=255, blank=True)\n\tnum_pages = models.IntegerField(blank=True)\n\tprince = models.FloatField(blank=True)\n\tin_print = models.BooleanField(default=True)\n\timage = models.FileField(upload_to='documents/%Y,%m,%d', null=True, blank=True)\n\n\tcover_image = models.FileField(upload_to=\"covers/%Y,%m/,%d/\", null=True, blank=True)\n\t\ndef __str__(self):\n\t\treturn self.title\n", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.models.Model", "line_number": 3, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 3, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 4, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 5, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "347881510", "text": "# Copyright 2017, 2018 Amazon.com, Inc. or its affiliates.\n\n# This module is part of Amazon Linux Extras.\n#\n# Amazon Linux Extras is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License v2 as published\n# by the Free Software Foundation.\n#\n# Amazon Linux Extras is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Amazon Linux Extras. If not, see .\n\nfrom __future__ import print_function, unicode_literals\n\nfrom .software_catalog import VERSION_KEY\n\nimport re\nimport os\nimport sys\nimport shutil\nfrom tempfile import NamedTemporaryFile\ntry:\n import ConfigParser as configparser\nexcept ImportError:\n import configparser\nimport logging as loggingmod\n\nif sys.version_info.major == 2:\n from gettext import gettext as gettext_yields_encoded\n _ = lambda *args: gettext_yields_encoded(*args).decode(\"UTF-8\")\nelse:\n from gettext import gettext as _\n\nCONFIG_URL_MATCHER = re.compile(r\".*?/extras/(?P[^/]+)/(?P[^/]+)/\\$basearch/mirror\\.list$\")\n\nYUMCONFIG_FILE_NAME = os.path.join(os.environ.get(\"TESTROOT\", \"/etc/yum.repos.d/\"), \"amzn2-extras.repo\")\nYUM_AMZN2_PRIORITY = 10\n\nYUMCONFIG_SECTION_EXTRAPREFIX = \"amzn2extra-\"\n\n\nlogger = loggingmod.getLogger(__name__)\n\n\ndef yum_ini_unquote(quoted_string):\n \"\"\"\n >>> yum_ini_unquote(\"abcdef_21__40__23__24_\")\n 'abcdef!@#$'\n \"\"\"\n def decode(match):\n return chr(int(match.group(1), 16))\n unquoted = re.sub(\"_([0-9a-fA-F]{2})_\", decode, quoted_string)\n unnamespaced_unquoted = re.sub(\"^(?:\"+re.escape(YUMCONFIG_SECTION_EXTRAPREFIX)+\")?\", r\"\", unquoted)\n return unnamespaced_unquoted\n\n\ndef yum_ini_quote(raw_string):\n \"\"\"\n >>> yum_ini_quote(\"test1.1+other2\")\n 'test1.1_2b_other2'\n >>> yum_ini_quote(\"abcdef!@#$\")\n 'abcdef_21__40__23__24_'\n \"\"\"\n def encode(match):\n return \"_%02x_\" % (ord(match.group(1)),)\n quoted = re.sub(\"([^-A-Za-z0-9.])\", encode, raw_string) # important to match _\n return YUMCONFIG_SECTION_EXTRAPREFIX + quoted\n\n\ndef read_configuration():\n \"\"\"Read the YUM configuration file we manage.\"\"\"\n config = configparser.RawConfigParser()\n if os.path.isfile(YUMCONFIG_FILE_NAME):\n config.read(YUMCONFIG_FILE_NAME)\n state = {}\n for section in config.sections():\n\n if section.endswith((\"-source\", \"-debuginfo\")):\n continue\n\n quoted_section_name = yum_ini_unquote(section)\n state[quoted_section_name] = {}\n for key, value in config.items(section):\n if key == \"mirrorlist\":\n match = CONFIG_URL_MATCHER.match(value)\n if not match:\n logger.error(\"Malformed url in %s section %s %r\", YUMCONFIG_FILE_NAME, section, value)\n raise ValueError(value)\n else:\n state[quoted_section_name][VERSION_KEY] = match.group(\"exactver\")\n\n if key in (\"enabled\",):\n if hasattr(value, \"lower\") and value.lower().strip() in (\"true\", \"1\", \"yes\"):\n state[quoted_section_name][key] = 1\n elif hasattr(value, \"lower\") and value.lower().strip() in (\"false\", \"0\", \"no\"):\n state[quoted_section_name][key] = 0\n elif value in (0, 1):\n state[quoted_section_name][key] = value\n else:\n logger.warn(\"Unexpected value for %s %r is %r\", section, key, value)\n state[quoted_section_name][key] = value\n else:\n state[quoted_section_name][key] = value\n\n if VERSION_KEY not in state[quoted_section_name] and state[quoted_section_name].get(\"enabled\"):\n state[quoted_section_name][VERSION_KEY] = \"latest\"\n return state\n\n\ndef write_configuration(state):\n \"\"\"Write the probably-mutated YUM configuration file we manage.\"\"\"\n\n src_suffix = \"-source\"\n dbg_suffix = \"-debuginfo\"\n\n config = configparser.RawConfigParser()\n config.read(YUMCONFIG_FILE_NAME)\n\n mirrorlist_fmt = \"http://amazonlinux.$awsregion.$awsdomain/$releasever/extras/{extraname}/{exactver}/{sect}/mirror.list\"\n\n for key in state:\n ini_section = yum_ini_quote(key)\n ini_section_dbg = ini_section + dbg_suffix\n ini_section_src = ini_section + src_suffix\n\n if not config.has_section(ini_section_src):\n config.add_section(ini_section_src)\n\n if not config.has_section(ini_section_dbg):\n config.add_section(ini_section_dbg)\n\n if not config.has_section(ini_section):\n config.add_section(ini_section)\n\n # defaults\n config.set(ini_section, \"enabled\", state[key][\"enabled\"])\n\n config.set(ini_section_dbg, \"enabled\",\n config.get(ini_section_dbg, \"enabled\") if config.has_option(ini_section_dbg, \"enabled\") else \"0\")\n\n config.set(ini_section_src, \"enabled\",\n config.get(ini_section_src, \"enabled\") if config.has_option(ini_section_src, \"enabled\") else \"0\")\n\n # user settings\n for k, v in sorted(state[key].items()):\n if k == VERSION_KEY:\n continue\n config.set(ini_section, k, v)\n\n # overrides of user settings\n config.set(ini_section, \"name\", \"Amazon Extras repo for \" + key)\n config.set(ini_section_src, \"name\", \"Amazon Extras source repo for \" + key)\n config.set(ini_section_dbg, \"name\", \"Amazon Extras debuginfo repo for \" + key)\n\n config.set(ini_section, \"mirrorlist\", mirrorlist_fmt.format(extraname=key, exactver=state[key].get(VERSION_KEY, \"latest\"), sect=\"$basearch\"))\n config.set(ini_section_src, \"mirrorlist\", mirrorlist_fmt.format(extraname=key, exactver=state[key].get(VERSION_KEY, \"latest\"), sect=\"SRPMS\"))\n config.set(ini_section_dbg, \"mirrorlist\", mirrorlist_fmt.format(extraname=key, exactver=state[key].get(VERSION_KEY, \"latest\"), sect=\"debuginfo/$basearch\"))\n\n assert CONFIG_URL_MATCHER.match(config.get(ini_section, \"mirrorlist\"))\n config.set(ini_section, \"gpgcheck\", 1)\n config.set(ini_section_src, \"gpgcheck\", 1)\n config.set(ini_section_dbg, \"gpgcheck\", 1)\n config.set(ini_section, \"priority\", YUM_AMZN2_PRIORITY)\n config.set(ini_section_src, \"priority\", YUM_AMZN2_PRIORITY)\n config.set(ini_section_dbg, \"priority\", YUM_AMZN2_PRIORITY)\n\n with NamedTemporaryFile(mode=\"w+t\", delete=False) as ntf:\n written_file_name = ntf.name\n ntf.write(\"\\n\\n\\n\\n### This file is managed with amazon-linux-extras. Please manage with that tool.\\n\")\n ntf.write(\"\\n\"*20) # Scroll the good stuff below impulsive attentions.\n config.write(ntf)\n os.chmod(written_file_name, 0o644) # Make world-readable, owner writable\n\n try:\n shutil.move(written_file_name, YUMCONFIG_FILE_NAME)\n except IOError as exc:\n logger.error(_(\"You lack permissions to write to system configuration.\") + \" \" + YUMCONFIG_FILE_NAME)\n raise\n", "sub_path": "usr/lib/python2.7/site-packages/amazon_linux_extras/repo.py", "file_name": "repo.py", "file_ext": "py", "file_size_in_byte": 7250, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.version_info", "line_number": 32, "usage_type": "attribute"}, {"api_name": "gettext.gettext", "line_number": 34, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 56, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 57, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 70, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 94, "usage_type": "name"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 109, "usage_type": "name"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 110, "usage_type": "name"}, {"api_name": "configparser.RawConfigParser", "line_number": 120, "usage_type": "call"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 150, "usage_type": "name"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 159, "usage_type": "argument"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 160, "usage_type": "argument"}, {"api_name": "software_catalog.VERSION_KEY", "line_number": 161, "usage_type": "argument"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 171, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 176, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 179, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "215979678", "text": "import requests\r\nimport json\r\nimport emoji\r\nimport jieba\r\nimport os\r\nimport wordcloud \r\nstopWordList=open('stopWord.txt').read().splitlines()\r\nmember_list=[]\r\n#分词\r\ndef jiebaClearText(text):\r\n \r\n jieba_list=jieba.cut(text,cut_all=False)\r\n \r\n #return ' '.join(list)\r\n outstr=\"\"\r\n for word in jieba_list:\r\n if word not in stopWordList:\r\n outstr += \" \"\r\n outstr += word \r\n return outstr\r\n\r\n\r\ndef get_pl (av,ff,page=1):\r\n try:\r\n payload={'oid':av,'type':'1','next':str(page)}\r\n response = requests.get('https://api.bilibili.com/x/v2/reply/main', params=payload)\r\n response_json=response.json()\r\n \r\n #print((response_json['data']['cursor']['is_end']))\r\n if not (response_json['data']['cursor']['is_end']):\r\n for i in response_json['data']['replies']:\r\n print('\\n'+str(i['member']['uname'])+':'+emoji.demojize(str(i['content']['message'])))\r\n member_list.append(i['member']['uname'])\r\n ff.write(jiebaClearText(emoji.demojize(i['content']['message']))) \r\n if not i['replies'] == None:\r\n for each in i['replies']:\r\n print('\\n'+str(each['member']['uname'])+':'+emoji.demojize(str(each['content']['message'])))\r\n member_list.append(each['member']['uname'])\r\n ff.write(jiebaClearText(emoji.demojize(str(each['content']['message'])))) \r\n get_pl(av,ff,page+1)\r\n else:\r\n print(\"成功获取全部评论及回复\")\r\n print(\"一共有%d页\"%(int(response_json['data']['cursor']['prev'])-1))\r\n except:\r\n print(\"可能出现了未知的错误\")\r\n\r\ndef makeWordCloud(cloudText,filename):\r\n if(cloudText == \"\" ):\r\n print(\"评论为空\")\r\n\r\n else:\r\n w=wordcloud.WordCloud(\r\n font_path='C:/Windows/Fonts/simkai.ttf',\r\n background_color='white',\r\n width=4096,\r\n height=2160,\r\n max_words=1000,\r\n )\r\n w.generate(cloudText)\r\n w.to_file(filename+\".png\")\r\n os.startfile(filename+\".png\")\r\n #os.startfile(av+\".txt\")\r\n\r\ndef somebody_in(name):\r\n for each in member_list:\r\n if each==name:\r\n print('找到目标用户')\r\n break\r\n else:\r\n print(\"没有找到目标用户\")\r\n\r\nif __name__==\"__main__\":\r\n av=input(\"输入AV号:\")\r\n file_open=open(av+\".txt\",'w',encoding='utf-8')\r\n get_pl(av,file_open)\r\n file_open.close()\r\n text=open(av+'.txt','r',encoding='utf-8').read()\r\n makeWordCloud(text,av)\r\n #somebody_in(\"大哥大非常大\")\r\n \r\n \r\n\r\n\r\n \r\n", "sub_path": "my_getbilibili.py", "file_name": "my_getbilibili.py", "file_ext": "py", "file_size_in_byte": 2727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "jieba.cut", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "emoji.demojize", "line_number": 32, "usage_type": "call"}, {"api_name": "emoji.demojize", "line_number": 34, "usage_type": "call"}, {"api_name": "emoji.demojize", "line_number": 37, "usage_type": "call"}, {"api_name": "emoji.demojize", "line_number": 39, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 52, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "277708685", "text": "from selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nimport datetime\r\nfrom dateutil.parser import parse\r\nimport sys\r\nfrom crawler.db import save\r\n\r\ndef RT():\r\n\tdriver = webdriver.PhantomJS('/home/ec2-user/temp2/phantomjs-2.1.1-linux-x86_64/bin/phantomjs')\r\n\tlist = ['https://www.rottentomatoes.com/browse/opening', 'https://www.rottentomatoes.com/browse/in-theaters','https://www.rottentomatoes.com/browse/upcoming']\r\n\tfor site in list:\r\n\t\tdriver.get(site)\r\n\t\ttry:\r\n\t\t\tWebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME , \"movie_info\")))\r\n\t\t\thtml = driver.page_source\r\n\t\t\tsoup = BeautifulSoup(html, \"html.parser\")\r\n\t\t\tinfo = soup.find_all(\"div\", {\"class\" : \"movie_info\"})\r\n\t\t\tfor item in info:\r\n\t\t\t\tname = item.find(\"h3\", {\"class\" : \"movieTitle\"})\r\n\t\t\t\tpoint = item.find_all(\"span\", {\"class\" : \"tMeterScore\"})\r\n\t\t\t\treleasedatetag = item.find(\"p\", {\"class\" : \"release-date\"})\r\n\t\t\t\treleasedate = releasedatetag.text\r\n\t\t\t\treleasedate = releasedate.replace(\"In Theaters \", \"\")\r\n\t\t\t\trdate = parse(releasedate)\r\n\t\t\t\tif len(point) == 0:\r\n\t\t\t\t\tpointtext = ''\r\n\t\t\t\telse:\r\n\t\t\t\t\tpointtext = point[len(point) - 1].text.strip()\r\n\r\n\t\t\t\tsave(name.text.strip(), rdate, pointtext, 'RT')\t\r\n\t\tfinally:\r\n\t\t\tprint(\"===============RT END==============\")\r\n\t\t\t\r\n\tdriver.close()\r\n\tdriver.quit()\r\n", "sub_path": "crawler/rt.py", "file_name": "rt.py", "file_ext": "py", "file_size_in_byte": 1571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.PhantomJS", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 18, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 18, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 18, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "crawler.db.save", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "646740882", "text": "import decimal\nfrom calendar import timegm\nfrom datetime import datetime, date\nfrom dateutil.tz import tzutc\n\nfrom marshmallow import ValidationError, Schema as MaSchema, missing, class_registry, utils\nfrom marshmallow import fields as ma_fields, validates_schema\nfrom marshmallow.base import SchemaABC\nfrom marshmallow.compat import basestring\nfrom marshmallow.fields import _RECURSIVE_NESTED\nimport bson\n\nfrom .i18n import gettext as _\n\n\n__all__ = (\n 'schema_validator_check_unknown_fields',\n 'schema_from_umongo_get_attribute',\n 'SchemaFromUmongo',\n\n 'StrictDateTime',\n 'Timestamp',\n 'ObjectId',\n 'Reference',\n 'GenericReference'\n)\n\n\n# Bonus: schema helpers !\nUNKNOWN_FIELD_ERROR = _('Unknown field.')\n\n\ndef schema_validator_check_unknown_fields(self, data, original_data):\n \"\"\"\n Schema validator, raise ValidationError for unknown fields in a\n marshmallow schema.\n\n example::\n\n class MySchema(marshsmallow.Schema):\n # method's name is not important\n __check_unknown_fields = validates_schema(pass_original=True)(\n schema_validator_check_unknown_fields)\n\n # Define the rest of your schema\n ...\n\n ..note:: Unknown fields with `missing` value will be ignored\n \"\"\"\n # Just skip if dummy data have been passed to the schema\n if not isinstance(original_data, dict):\n return\n loadable_fields = [k for k, v in self.fields.items() if not v.dump_only]\n unknown_fields = {key for key, value in original_data.items()\n if value is not missing and key not in loadable_fields}\n if unknown_fields:\n raise ValidationError(UNKNOWN_FIELD_ERROR, unknown_fields)\n\n\ndef schema_from_umongo_get_attribute(self, attr, obj, default):\n \"\"\"\n Overwrite default `Schema.get_attribute` method by this one to access\n umongo missing fields instead of returning `None`.\n\n example::\n\n class MySchema(marshsmallow.Schema):\n get_attribute = schema_from_umongo_get_attribute\n\n # Define the rest of your schema\n ...\n\n \"\"\"\n ret = MaSchema.get_attribute(self, attr, obj, default)\n if ret is None and ret is not default and attr in obj.schema.fields:\n raw_ret = obj._data.get(attr)\n return default if raw_ret is missing else raw_ret\n else:\n return ret\n\n\nclass SchemaFromUmongo(MaSchema):\n \"\"\"\n Custom :class:`marshmallow.Schema` subclass providing unknown fields\n checking and custom get_attribute for umongo documents.\n\n .. note: It is not mandatory to use this schema with umongo document.\n This is just a helper providing usefull behaviors.\n \"\"\"\n\n __check_unknown_fields = validates_schema(pass_original=True)(\n schema_validator_check_unknown_fields)\n get_attribute = schema_from_umongo_get_attribute\n\n\n# Bonus: new fields !\n\nclass DateTime(ma_fields.DateTime):\n \"\"\"\n Marshmallow DateTime field\n \"\"\"\n\n def _deserialize(self, value, attr, data):\n if not isinstance(value, datetime):\n if isinstance(value, date):\n value = datetime.combine(value, datetime.min.time())\n else:\n value = super()._deserialize(value, attr, data)\n return value\n\n\nclass StrictDateTime(ma_fields.DateTime):\n \"\"\"\n Marshmallow DateTime field with extra parameter to control\n whether dates should be loaded as tz_aware or not\n \"\"\"\n\n def __init__(self, load_as_tz_aware=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.load_as_tz_aware = load_as_tz_aware\n\n def _deserialize(self, value, attr, data):\n date = super()._deserialize(value, attr, data)\n return self._set_tz_awareness(date)\n\n def _set_tz_awareness(self, date):\n if self.load_as_tz_aware:\n # If datetime is TZ naive, set UTC timezone\n if date.tzinfo is None or date.tzinfo.utcoffset(date) is None:\n date = date.replace(tzinfo=tzutc())\n else:\n # If datetime is TZ aware, convert it to UTC and remove TZ info\n if date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None:\n date = date.astimezone(tzutc())\n date = date.replace(tzinfo=None)\n return date\n\n\nclass Timestamp(ma_fields.Integer):\n \"\"\"\n Marshmallow Timestamp field\n \"\"\"\n default_error_messages = {\n 'invalid': 'Timestamp must be integer.'\n }\n\n def __init__(self, auto_now=False, **kwargs):\n super().__init__(**kwargs)\n if auto_now:\n self.missing = datetime.utcnow\n self.dump_only = True\n\n def _serialize(self, value, attr, obj):\n if value is None:\n return None\n return timegm(value.utctimetuple())\n\n def _deserialize(self, value, attr, data):\n return datetime.utcfromtimestamp(value)\n\n\nclass Decimal(ma_fields.Decimal):\n \"\"\"\n Marshmallow field for :class:`decimal.Decimal`\n \"\"\"\n\n def __init__(self, places=None, rounding=None, allow_nan=False, as_string=False, **kwargs):\n if places is not None and not isinstance(places, decimal.Decimal):\n places = decimal.Decimal((0, (1,), -places))\n self.places = places\n self.rounding = rounding\n self.allow_nan = allow_nan\n self.as_string = as_string\n super(ma_fields.Number, self).__init__(as_string=as_string, **kwargs)\n\n\nclass Float(ma_fields.Float):\n \"\"\"\n Marshmallow float field\n \"\"\"\n\n def __init__(self, places=None, as_string=False, **kwargs):\n self.places = places\n self.as_string = as_string\n super().__init__(as_string=as_string, **kwargs)\n\n # override Number\n def _format_num(self, value):\n if value is None:\n return None\n\n value = self.num_type(value)\n if self.places is not None:\n value = round(value, self.places)\n return value\n\n\nclass ObjectId(ma_fields.Field):\n \"\"\"\n Marshmallow field for :class:`bson.ObjectId`\n \"\"\"\n\n def _serialize(self, value, attr, obj):\n if value is None:\n return None\n return str(value)\n\n def _deserialize(self, value, attr, data):\n try:\n return bson.ObjectId(value)\n except (bson.errors.InvalidId, TypeError):\n raise ValidationError(_('Invalid ObjectId.'))\n\n\nclass Reference(ma_fields.Field):\n \"\"\"\n Marshmallow field for :class:`umongo.fields.ReferenceField`\n \"\"\"\n\n def __init__(self, nested, exclude=tuple(), only=None, mongo_world=False, **kwargs):\n self.nested = nested\n self.only = only\n self.exclude = exclude\n self.many = kwargs.get('many', False)\n self.mongo_world = mongo_world\n self.__schema = None # Cached Schema instance\n self.__updated_fields = False\n super().__init__(**kwargs)\n\n @property\n def schema(self):\n \"\"\"The nested Schema object.\n\n .. versionchanged:: 1.0.0\n Renamed from `serializer` to `schema`\n \"\"\"\n if not self.__schema:\n # Ensure that only parameter is a tuple\n if isinstance(self.only, basestring):\n only = (self.only,)\n else:\n only = self.only\n\n # Inherit context from parent.\n context = getattr(self.parent, 'context', {})\n if isinstance(self.nested, SchemaABC):\n self.__schema = self.nested\n self.__schema.context.update(context)\n elif isinstance(self.nested, type) and \\\n issubclass(self.nested, SchemaABC):\n self.__schema = self.nested(many=self.many,\n only=only, exclude=self.exclude, context=context,\n load_only=self._nested_normalized_option('load_only'),\n dump_only=self._nested_normalized_option('dump_only'))\n elif isinstance(self.nested, basestring):\n if self.nested == _RECURSIVE_NESTED:\n parent_class = self.parent.__class__\n self.__schema = parent_class(many=self.many, only=only,\n exclude=self.exclude, context=context,\n load_only=self._nested_normalized_option('load_only'),\n dump_only=self._nested_normalized_option('dump_only'))\n else:\n schema_class = class_registry.get_class(self.nested)\n self.__schema = schema_class(many=self.many,\n only=only, exclude=self.exclude, context=context,\n load_only=self._nested_normalized_option('load_only'),\n dump_only=self._nested_normalized_option('dump_only'))\n else:\n raise ValueError('Nested fields must be passed a '\n 'Schema, not {0}.'.format(self.nested.__class__))\n self.__schema.ordered = getattr(self.parent, 'ordered', False)\n return self.__schema\n\n def _nested_normalized_option(self, option_name):\n nested_field = '%s.' % self.name\n return [field.split(nested_field, 1)[1]\n for field in getattr(self.root, option_name, set())\n if field.startswith(nested_field)]\n\n def _serialize(self, nested_obj, attr, obj):\n # Load up the schema first. This allows a RegistryError to be raised\n # if an invalid schema name was passed\n if nested_obj is None:\n return None\n elif self.mongo_world:\n # In mongo world, value is a regular ObjectId\n return str(nested_obj)\n\n if getattr(nested_obj, '_document', None):\n nested_obj = nested_obj._document\n else:\n return str(nested_obj.pk)\n\n schema = self.schema\n if not self.__updated_fields:\n schema._update_fields(obj=nested_obj, many=self.many)\n self.__updated_fields = True\n ret, errors = schema.dump(nested_obj, many=self.many,\n update_fields=not self.__updated_fields)\n if isinstance(self.only, basestring): # self.only is a field name\n only_field = self.schema.fields[self.only]\n key = ''.join([self.schema.prefix or '', only_field.dump_to or self.only])\n if self.many:\n return utils.pluck(ret, key=key)\n else:\n return ret[key]\n if errors:\n raise ValidationError(errors, data=ret)\n return ret\n\n def _deserialize(self, value, attr, data):\n try:\n return bson.ObjectId(value)\n except (bson.errors.InvalidId, TypeError):\n raise ValidationError(_('Invalid ObjectId.'))\n\n\nclass GenericReference(ma_fields.Field):\n \"\"\"\n Marshmallow field for :class:`umongo.fields.GenericReferenceField`\n \"\"\"\n\n def __init__(self, *args, mongo_world=False, **kwargs):\n super().__init__(*args, **kwargs)\n self.mongo_world = mongo_world\n\n def _serialize(self, value, attr, obj):\n if value is None:\n return None\n if self.mongo_world:\n # In mongo world, value a dict of cls and id\n return {'id': str(value['_id']), 'cls': value['_cls']}\n else:\n # In OO world, value is a :class:`umongo.data_object.Reference`\n return {'id': str(value.pk), 'cls': value.document_cls.__name__}\n\n def _deserialize(self, value, attr, data):\n if not isinstance(value, dict):\n raise ValidationError(_(\"Invalid value for generic reference field.\"))\n if value.keys() != {'cls', 'id'}:\n raise ValidationError(_(\"Generic reference must have `id` and `cls` fields.\"))\n try:\n _id = bson.ObjectId(value['id'])\n except ValueError:\n raise ValidationError(_(\"Invalid `id` field.\"))\n if self.mongo_world:\n return {'_cls': value['cls'], '_id': _id}\n else:\n return {'cls': value['cls'], 'id': _id}\n", "sub_path": "umongo/marshmallow_bonus.py", "file_name": "marshmallow_bonus.py", "file_ext": "py", "file_size_in_byte": 12039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "i18n.gettext", "line_number": 30, "usage_type": "call"}, {"api_name": "marshmallow.missing", "line_number": 55, "usage_type": "name"}, {"api_name": "marshmallow.ValidationError", "line_number": 57, "usage_type": "call"}, {"api_name": "marshmallow.Schema.get_attribute", "line_number": 74, "usage_type": "call"}, {"api_name": "marshmallow.Schema", "line_number": 74, "usage_type": "name"}, {"api_name": "marshmallow.missing", "line_number": 77, "usage_type": "name"}, {"api_name": "marshmallow.Schema", "line_number": 82, "usage_type": "name"}, {"api_name": "marshmallow.validates_schema", "line_number": 91, "usage_type": "call"}, {"api_name": "marshmallow.fields.DateTime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 105, "usage_type": "argument"}, {"api_name": "datetime.datetime.combine", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "name"}, {"api_name": "datetime.datetime.min.time", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.min", "line_number": 106, "usage_type": "attribute"}, {"api_name": "marshmallow.fields.DateTime", "line_number": 112, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 112, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 123, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 124, "usage_type": "argument"}, {"api_name": "datetime.date.tzinfo", "line_number": 129, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 129, "usage_type": "name"}, {"api_name": "datetime.date.tzinfo.utcoffset", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 130, "usage_type": "name"}, {"api_name": "datetime.date.replace", "line_number": 130, "usage_type": "call"}, {"api_name": "dateutil.tz.tzutc", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.date.tzinfo", "line_number": 133, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.date.tzinfo.utcoffset", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 134, "usage_type": "name"}, {"api_name": "datetime.date.astimezone", "line_number": 134, "usage_type": "call"}, {"api_name": "dateutil.tz.tzutc", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.date.replace", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 136, "usage_type": "name"}, {"api_name": "marshmallow.fields.Integer", "line_number": 139, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 139, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 150, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "name"}, {"api_name": "calendar.timegm", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 159, "usage_type": "name"}, {"api_name": "marshmallow.fields.Decimal", "line_number": 162, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 162, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 168, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 169, "usage_type": "call"}, {"api_name": "marshmallow.fields.Number", "line_number": 174, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 174, "usage_type": "name"}, {"api_name": "marshmallow.fields.Float", "line_number": 177, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 177, "usage_type": "name"}, {"api_name": "marshmallow.fields.Field", "line_number": 198, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 198, "usage_type": "name"}, {"api_name": "bson.ObjectId", "line_number": 210, "usage_type": "call"}, {"api_name": "bson.errors", "line_number": 211, "usage_type": "attribute"}, {"api_name": "marshmallow.ValidationError", "line_number": 212, "usage_type": "call"}, {"api_name": "i18n.gettext", "line_number": 212, "usage_type": "call"}, {"api_name": "marshmallow.fields.Field", "line_number": 215, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 215, "usage_type": "name"}, {"api_name": "marshmallow.compat.basestring", "line_number": 239, "usage_type": "argument"}, {"api_name": "marshmallow.base.SchemaABC", "line_number": 246, "usage_type": "argument"}, {"api_name": "marshmallow.base.SchemaABC", "line_number": 250, "usage_type": "argument"}, {"api_name": "marshmallow.compat.basestring", "line_number": 255, "usage_type": "argument"}, {"api_name": "marshmallow.fields._RECURSIVE_NESTED", "line_number": 256, "usage_type": "name"}, {"api_name": "marshmallow.class_registry.get_class", "line_number": 263, "usage_type": "call"}, {"api_name": "marshmallow.class_registry", "line_number": 263, "usage_type": "name"}, {"api_name": "marshmallow.compat.basestring", "line_number": 300, "usage_type": "argument"}, {"api_name": "marshmallow.utils.pluck", "line_number": 304, "usage_type": "call"}, {"api_name": "marshmallow.utils", "line_number": 304, "usage_type": "name"}, {"api_name": "marshmallow.ValidationError", "line_number": 308, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 313, "usage_type": "call"}, {"api_name": "bson.errors", "line_number": 314, "usage_type": "attribute"}, {"api_name": "marshmallow.ValidationError", "line_number": 315, "usage_type": "call"}, {"api_name": "i18n.gettext", "line_number": 315, "usage_type": "call"}, {"api_name": "marshmallow.fields.Field", "line_number": 318, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 318, "usage_type": "name"}, {"api_name": "marshmallow.ValidationError", "line_number": 339, "usage_type": "call"}, {"api_name": "i18n.gettext", "line_number": 339, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 341, "usage_type": "call"}, {"api_name": "i18n.gettext", "line_number": 341, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 343, "usage_type": "call"}, {"api_name": "marshmallow.ValidationError", "line_number": 345, "usage_type": "call"}, {"api_name": "i18n.gettext", "line_number": 345, "usage_type": "call"}]} +{"seq_id": "446609478", "text": "#========================================================================== \n# REINFORCE Working Code \n# \n# The code was initially written for UCSB Deep Reinforcement Learning Seminar 2018\n#\n# Authors: Jieliang (Rodger) Luo, Sam Green\n#\n# May 8th, 2018\n#==========================================================================\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Categorical \n\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntotal_episodes = 500\nlearning_rate = 0.001\ndiscount_factor = 0.99\nmax_steps = 1000\nrender = False\n\nADVANTAGE = 2 # 0: discounted reward; 1: baseline; 2: rewards to go\n\nclass Net(nn.Module):\n\n\tdef __init__(self, num_features, num_actions):\n\t\tsuper(Net, self).__init__()\n\t\tself.fc1 = nn.Linear(num_features, 128)\n\t\tself.fc2 = nn.Linear(128, num_actions)\n\n\tdef forward(self, x):\n\t\tx = F.relu(self.fc1(x))\n\t\tx = self.fc2(x)\n\t\treturn F.softmax(x)\n\nclass Agent:\n\n\tdef __init__(self, num_features, num_actions, learning_rate, discount_factor):\n\t\t\n\t\t# policy is a nerual network\n\t\tself.policy = Net(num_features, num_actions)\n\t\tself.discount_factor = discount_factor\n\t\tself.optimizer = optim.Adam(self.policy.parameters(), lr=learning_rate)\n\t\t#self.optimizer = optim.SGD(self.policy.parameters(), lr=learning_rate)\n\n\tdef choose_action(self, state):\n\t\tstate = torch.Tensor(state)\n\t\tprobs = self.policy.forward(Variable(state))\n\n\t\t#stochastic policy\n\t\taction = Categorical(probs).sample().data[0]\n\n\t\treturn action, probs\n\n\tdef learn(self, states, actions, rewards, advantage):\n\t\t\n\t\tactions = Variable(torch.LongTensor(actions))\n\t\trewards = np.array(rewards)\n\n\t\tdiscounted = []\n\n\t\t# discount rewards \n\t\tif advantage == 0:\n\n\t\t\t#discounted reward part\n\t\t\tR = 0\n\t\t\tfor r in rewards[::-1]:\n\t\t\t\tR = r + self.discount_factor * R\n\t\t\t\tdiscounted.insert(0, R)\n\n\t\t\tdiscounted -= np.mean(discounted)\n\t\t\tdiscounted /= np.std(discounted)\t\n\t\t\tdiscounted = torch.Tensor(discounted)\n\n\t\t# baseline\n\t\telif advantage == 1:\n\t\t\tpass\n\n\t\t# rewards to go\n\t\telif advantage == 2:\n\t\t\t\n\t\t\tR = 0\n\t\t\tfor r in rewards[::-1]:\n\t\t\t\tR += r\n\t\t\t\tdiscounted.insert(0, R)\n\n\t\tpolicy_loss = []\n\t\tfor(state, action, reward) in zip(states, actions, discounted):\n\t\t\t\n\t\t\t#calculate log probability of the chosen action times the reward \n\t\t\tprobs = self.policy.forward(Variable(torch.Tensor(state)))\n\t\t\tm = Categorical(probs)\n\t\t\tlog_prob = m.log_prob(action)\n\t\t\tpolicy_loss.append(-log_prob*reward) # the negative sign is because the optimizer is doing gradient decent \n\n\t\tself.optimizer.zero_grad()\n\t\tpolicy_loss = torch.cat(policy_loss).sum() #concatenate -> sum \n\t\tpolicy_loss.backward()\n\t\tself.optimizer.step()\n\ndef main():\n\t\n\t# initial env\n\tenv = gym.make('Acrobot-v1')\n\tenv.seed(1)\n\tenv = env.unwrapped\n\n\tprint(env.action_space)\n\tprint(env.observation_space)\n\tprint(env.observation_space.high)\n\tprint(env.observation_space.low)\n\n\t# initial agent\n\tagent = Agent(\n\t\tnum_features = env.observation_space.shape[0],\n\t\tnum_actions = env.action_space.n,\n\t\tlearning_rate = learning_rate,\n\t\tdiscount_factor = discount_factor\n\t)\n\n\tall_rewards = []\n\n\tfor episode in range(total_episodes):\n\n\t\tdone = False\n\t\tS = [] # states\n\t\tA = [] # actions\n\t\tR = [] # rewards\n\n\t\tstate = env.reset()\n\t\t\n\t\tfor step in range(max_steps):\n\t\t\t\n\t\t\tif episode > 350 and render is True:\n\t\t\t\tenv.render()\n\n\t\t\taction, probs = agent.choose_action(state)\n\t\t\tstate_next, reward, done, info = env.step(action)\n\n\t\t\tS.append(state)\n\t\t\tA.append(action)\n\t\t\tR.append(reward)\n\n\t\t\tstate = state_next \n\n\t\t\tif done:\n\t\t\t\tbreak\t\t\t\t \n\t\t\t\t\n\t\tagent.learn(S, A, R, ADVANTAGE)\n\n\t\tprint(\"Episode: {}, reward: {}\".format(episode, np.sum(R)))\n\t\tall_rewards.append(np.sum(R))\n\n\t# draw a plot of all the rewards\n\tplt.plot(all_rewards)\n\tplt.show()\n\nif __name__ == \"__main__\":\n\tmain()", "sub_path": "3_gradient_intro/REINFORCE.py", "file_name": "REINFORCE.py", "file_ext": "py", "file_size_in_byte": 3841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.nn.Module", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 103, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "471655718", "text": "import json\nfrom discord.ext import commands\nfrom utils import conjugator\n\n\nclass Japanese(commands.Cog):\n \"\"\"A cog that provides some useful japanese tools\"\"\"\n\n def __init__(self):\n\n with open(\"utils/japanese_verbs.json\") as f:\n verbs = json.load(f)\n\n for key, value in verbs.items():\n if value == 1:\n verbs[key] = conjugator.GodanVerbs(key)\n if value == 2:\n verbs[key] = conjugator.IchidanVerbs(key)\n if value == 3:\n verbs[key] = conjugator.IrregularVerbs(key)\n\n self.verbs = verbs\n\n @commands.command(aliases=[\"活用\", \"かつよう\", \"katsuyou\"])\n async def conjugate(self, ctx, verb):\n \"\"\"Conjugate the provided verb. Provide the verb in dictionary form\n\n EXAMPLE: !conjugate 食べる\n RESULT: A menu providing common conjugations for 食べる\n \"\"\"\n verb = self.verbs.get(verb)\n\n if verb is None:\n return await ctx.send(f\"Sorry, I don't know {verb}\")\n\n await verb.display(ctx)\n\n\ndef setup(bot):\n bot.add_cog(Japanese())\n", "sub_path": "cogs/japanese.py", "file_name": "japanese.py", "file_ext": "py", "file_size_in_byte": 1113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.conjugator.GodanVerbs", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.conjugator", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.conjugator.IchidanVerbs", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.conjugator", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.conjugator.IrregularVerbs", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.conjugator", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 24, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "120979103", "text": "from django.shortcuts import render\nfrom functions import *\n\ndef home(request): \n\ttitle = \"Website\"\n\tinvestReturn = roi(102)\n\tstates = [\"California\", \"Arizona\", \"Texas\", \"New York\", \"Washington DC\"]\n\tagents = {\"California\": [\"Tom Delaney\", \"Nick Gate\", \"Jim Morse\"]}\n\n\treturn render(request, 'home.html', {'title': title, 'roi': investReturn, 'states': states, 'agents': agents })\n\ndef about(request):\n\ttitle = \"Welcome to the About Page\"\n\taboutText = \"My about page\"\n\n\treturn render(request, 'about.html', {'title':title, 'about':aboutText})", "sub_path": "website/src/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "216989683", "text": "import os\nimport urllib\n\nimport pandas as pd\nimport requests\nfrom statsmodels.tsa.stattools import grangercausalitytests\n\nAUTHORIZE_ENDPOINT = \"https://www.fitbit.com\"\n\nCLIENT_ID = os.environ['FITBIT_ID']\nCLIENT_SECRET = os.environ['FITBIT_SECRET']\nREDIRECT_URI = 'https://127.0.0.1:3000/fitbit_auth'\n\n# generated placeholder data\nplaceholder_insights = pd.read_csv('insights.csv', header=None, names=['ds', 'y'])\nplaceholder_activity = pd.read_csv('/Users/datatron/Downloads/fitbit.csv', header=0, names=['ds', 'activity_time'])\n\n\ndef get_fitbit_json():\n placeholder_activity.columns = ['date', 'value']\n placeholder_activity['date'] = pd.to_datetime(placeholder_activity['date'])\n return placeholder_activity.to_json(orient='records', date_format='iso', double_precision=2)\n\n\ndef compute_casuality(data=placeholder_insights, data2=placeholder_activity):\n merged = pd.merge(data,data2, on=['ds','ds'])\n m = merged[['y','activity_time']].as_matrix()\n result = grangercausalitytests(m, maxlag=5)\n return result[2][0]['lrtest'][1]\n\n\ndef get_fitbit_auth_url():\n data = {}\n data[\"scope\"] = \"activity weight\"\n data[\"response_type\"] = \"code\"\n data[\"client_id\"] = CLIENT_ID\n data[\"expires_in\"] = 604800\n\n return \"{}/oauth2/authorize?\".format(AUTHORIZE_ENDPOINT) + urllib.parse.urlencode(data)\n\n\ndef exchange_for_credentials_fitbit(code):\n headers = {\n \"Authorization\": 'Basic MjI4TVJZOmRhNWJlM2M1NGM5YWQ0MDY4ZjBmMmUwZDNkN2NlYTQy',\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n\n data = {\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": REDIRECT_URI,\n \"clientId\": CLIENT_ID\n }\n\n resp = requests.post('https://api.fitbit.com/oauth2/token', headers=headers, data=data)\n return resp.json()['access_token']\n\n\ndef get_activity_time_series(code):\n endpoint = 'https://api.fitbit.com/1/user/-/{resource_path}/date/{date}/{period}.json'\n resp = requests.get(\n endpoint.format(\n resource_path='activities/minutesFairlyActive',\n date='today',\n period='1y'\n ), headers={'Authorization': 'Bearer {}'.format(code)})\n return resp.json()[\"activities-minutesFairlyActive\"]\n\n", "sub_path": "fitbit_api.py", "file_name": "fitbit_api.py", "file_ext": "py", "file_size_in_byte": 2237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 26, "usage_type": "call"}, {"api_name": "statsmodels.tsa.stattools.grangercausalitytests", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 39, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "180369559", "text": "#\n# See README.md for instructions\n#\nimport os\nimport traceback\nfrom flask import Flask, jsonify, request, render_template\nfrom flask_cors import CORS\nimport json\n\napp = Flask(__name__)\napp.secret_key = os.urandom(16)\nCORS(app, supports_credentials=True)\n\ndata = {}\n\n\n@app.route(\"/\")\ndef hello():\n return render_template(\"clock-bos.html\")\n # return \"

EvolveU test

API Server up and running..

\"\n\n\n@app.route(\"/all\", methods=['POST', 'GET'])\ndef all():\n\n print(list(data.values()))\n return jsonify(list(data.values())), 200\n\n\nfirstKeyType = None\n\n\n@app.route(\"/add\", methods=['POST'])\ndef add():\n global data, firstKeyType\n\n content = request.get_json()\n\n if 'key' not in content:\n return jsonify({\"msg\": \"There must be a 'key' attribute\"}), 400\n\n key = content['key']\n\n if firstKeyType:\n if not isinstance(key, firstKeyType):\n return jsonify({\"msg\": \"Keys must be of the same type, that last one was \" + str(firstKeyType) + \" but this one is \" + str(type(key))}), 400\n else:\n firstKeyType = type(key)\n\n if key in data:\n return jsonify({\"msg\": \"You can not add '\" + str(key) + \"' again.\"}), 400\n\n data[key] = content\n\n return jsonify({}), 200\n\n\n@app.route(\"/delete\", methods=['POST'])\ndef delete():\n global data\n\n content = request.get_json()\n\n if 'key' not in content:\n return jsonify({\"msg\": \"There must be a 'key' attribute\"}), 400\n\n key = content['key']\n\n if key not in data:\n return jsonify({\"msg\": \"You can not delete '\" + str(key) + \"', it does not exist.\"}), 400\n\n del data[key]\n return jsonify({}), 200\n\n\n@app.route(\"/read\", methods=['POST'])\ndef read():\n global data\n\n content = request.get_json()\n\n if 'key' not in content:\n return jsonify({\"msg\": \"There must be a 'key' attribute\"}), 400\n\n key = content['key']\n\n if key not in data:\n return jsonify({\"msg\": \"You can not read '\" + str(key) + \"', it does not exist.\"}), 400\n\n return jsonify([data[key]]), 200\n\n\n@app.route(\"/update\", methods=['POST'])\ndef update():\n global data\n\n content = request.get_json()\n\n if 'key' not in content:\n return jsonify({\"msg\": \"There must be a 'key' attribute\"}), 400\n\n key = content['key']\n\n if key not in data:\n return jsonify({\"msg\": \"You can not update '\" + str(key) + \"', it does not exist.\"}), 400\n\n data[key] = content\n return jsonify({}), 200\n\n\n@app.route(\"/load\", methods=['GET'])\ndef load():\n global data\n print(\"data1:\", data)\n with open('data.json') as json_file:\n data = json.load(json_file)\n for d in data:\n print('Record: ', d)\n print(\"data2:\", data)\n return \"

EvolveU test

\" + str(len(data)) + \" records Loaded

\"\n\n\n@app.route(\"/save\", methods=['GET'])\ndef save():\n global data\n with open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n return \"

EvolveU test

\" + str(len(data)) + \" records Saved

\"\n\n\n@app.route(\"/clear\", methods=['POST', 'GET'])\ndef clear():\n global data\n data = {}\n return jsonify(data), 200\n\n\n@app.route(\"/test\", methods=['POST', 'GET'])\ndef test():\n try:\n content = request.get_json()\n # print('in /test request: ',request)\n # print('in /test path: ',request.path)\n # print('in /test form: ',request.form)\n # print('in /test parms: ',request.args)\n # print('in /test json: ',request.get_json())\n return jsonify({'status': 'ok'}), 200\n except Exception as e:\n traceback.print_stack()\n print('**** Not a valid request. ', e)\n return jsonify('{}'), 400\n\n\nif __name__ == '__main__':\n print(\"--- Starting\", __file__)\n app.run(debug=True, use_reloader=True)\n", "sub_path": "api/web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 3758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 108, "usage_type": "call"}, {"api_name": "json.load", "line_number": 116, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 147, "usage_type": "call"}, {"api_name": "traceback.print_stack", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "156588259", "text": "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\n\nfrom six.moves import xrange\n\nfrom tensorforce.exceptions import ConfigError\nfrom tensorforce import preprocessing\n\npreprocessors = {\n 'concat': preprocessing.Concat,\n 'grayscale': preprocessing.Grayscale,\n 'imresize': preprocessing.Imresize,\n 'maximum': preprocessing.Maximum,\n 'normalize': preprocessing.Normalize,\n 'standardize': preprocessing.Standardize\n}\n\ndef global_seed(seed=42):\n \"\"\"\n Convenience function to control random seeding throughout the framework.\n :return: A numpy random number generator with a fixed seed.\n \"\"\"\n return np.random.RandomState(seed)\n\n\ndef get_path(continuous, episode):\n \"\"\"\n Finalises an episode and turns it into a dict pointing to numpy arrays.\n :return:\n \"\"\"\n path = {'states': np.concatenate(np.expand_dims(episode['states'], 0)),\n 'actions': np.array(episode['actions']),\n 'terminated': episode['terminated'],\n 'action_means': np.array(episode['action_means']),\n 'rewards': np.array(episode['rewards'])}\n\n if continuous:\n path['action_log_stds'] = np.concatenate(episode['action_log_stds'])\n\n return path\n\n\ndef repeat_action(environment, action, repeat_action=1):\n \"\"\"\n Repeat action `repeat_action_count` times. Cumulate reward and return last state.\n\n :param environment: Environment object\n :param action: Action to be executed\n :param repeat_action: How often to repeat the action\n :return: result dict\n \"\"\"\n if repeat_action <= 0:\n raise ValueError('repeat_action lower or equal zero')\n\n reward = 0.\n terminal_state = False\n for count in xrange(repeat_action):\n result = environment.execute_action(action)\n\n state = result['state']\n reward += result['reward']\n terminal_state = terminal_state or result['terminal_state']\n info = result.get('info', None)\n\n return dict(state=state,\n reward=reward,\n terminal_state=terminal_state,\n info=info)\n\n\ndef build_preprocessing_stack(config):\n stack = preprocessing.Stack()\n\n for preprocessor_conf in config:\n preprocessor_name = preprocessor_conf[0]\n\n preprocessor_params = []\n if len(preprocessor_conf) > 1:\n preprocessor_params = preprocessor_conf[1:]\n\n preprocessor_class = preprocessors.get(preprocessor_name, None)\n if not preprocessor_class:\n raise ConfigError(\"No such preprocessor: {}\".format(preprocessor_name))\n\n preprocessor = preprocessor_class(*preprocessor_params)\n stack += preprocessor\n\n return stack\n", "sub_path": "tensorforce/util/experiment_util.py", "file_name": "experiment_util.py", "file_ext": "py", "file_size_in_byte": 3422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorforce.preprocessing.Concat", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorforce.preprocessing.Grayscale", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorforce.preprocessing.Imresize", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 30, "usage_type": "name"}, {"api_name": "tensorforce.preprocessing.Maximum", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorforce.preprocessing.Normalize", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorforce.preprocessing.Standardize", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorforce.preprocessing", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 56, "usage_type": "call"}, {"api_name": "six.moves.xrange", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorforce.preprocessing.Stack", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorforce.preprocessing", "line_number": 90, "usage_type": "name"}, {"api_name": "tensorforce.exceptions.ConfigError", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "68465255", "text": "import rgb\nimport pygame\nfrom data_parser import get_sys_config\nfrom os import path\nfrom vlc import MediaPlayer\n\npygame.font.init()\n\nASSETS_DIR= path.join(*(get_sys_config()[\"Assets\"]))\n\ndef isWithin(point, rect):\n\tif point[0] > rect[0] and point[0] < (rect[0] + rect[2]):\n\t\tif point[1] > rect[1] and point[1] < (rect[1] + rect[3]):\n\t\t\treturn True\n\treturn False\n\nclass Button:\n\tdef __init__(self, name, ret, pos, size, key, colour, font, font_colour, hl_colour, sel_colour):\n\t\tself.name= name\n\t\tself.coords= list(pos)\n\t\tself.size= list(size)\n\t\tself.rect= pygame.Rect(self.coords+self.size)\n\t\tself.key= key\n\t\tif ret is None:\n\t\t\tself.ret= name\n\t\telse:\n\t\t\tself.ret= ret\n\t\tself.def_colour= self.colour= colour\n\t\tself.font_colour= font_colour\n\t\tself.hl_colour= hl_colour\n\t\tself.sel_colour= sel_colour\n\t\tself.font= font\n\t\tself.text= font.render(self.name, True, self.font_colour)\n\t\tself.rect[2]= self.width= max(size[0], self.text.get_width())\n\t\tself.rect[3]= self.height= max(size[1], self.text.get_height())\n\t\n\tdef align_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.centerx= self.rect.left\n\t\telse:\n\t\t\tself.rect.midtop= pos\n\t\treturn self\n\n\nclass KeyStroke:\n\tdef __init__(self, name, key, ret):\n\t\tself.name= name\n\t\tself.key= key\n\t\tif ret is None:\n\t\t\tself.ret= name\n\t\telse:\n\t\t\tself.ret= ret\t\n\nclass SpKeyStroke(KeyStroke):\n\tdef __init__(self, name, key, ret):\n\t\tsuper().__init__(name, key, ret)\n\nclass ActionManager:\n\tdef __init__(self):\n\t\tASSETS_DIR = path.join(*get_sys_config()['Assets'])\n\t\tself.press_sound = MediaPlayer(f\"{ASSETS_DIR}BUTTONPRESS.mp3\")\n\t\tself.buttons= []\n\t\tself.scroll_buttons= []\n\t\tself.keystrokes= []\n\t\tself.sp_keystrokes= []\n\t\tself.scroll_items= set()\n\t\tself.scroll_pos= 0\n\t\tself.scroll_min= 0\n\t\tself.scroll_max= 600\n\t\n\tdef add_button(self, name, pos, size, ret=None, key= None, colour= rgb.BLACK, \\\n\t\t\t\tfont= pygame.font.Font(f\"{ASSETS_DIR}Barcade-R4LM.otf\", 22), \\\n\t\t\t\t\tfont_colour= rgb.YELLOW, hl_colour= rgb.GREY, sel_colour= rgb.GREEN, \\\n\t\t\t\t\t\tcanScroll= False, isCenter= False):\n\t\tif canScroll:\n\t\t\t\n\t\t\tif isCenter:\n\t\t\t\tself.scroll_buttons.append(Button(name, ret, pos, size, key, colour, font, font_colour, hl_colour, sel_colour).align_ctr())\n\t\t\telse:\n\t\t\t\tself.scroll_buttons.append(Button(name, ret, pos, size, key, colour, font, font_colour, hl_colour, sel_colour))\n\t\t\t\t\n\t\telse:\n\t\t\t\n\t\t\tif isCenter:\n\t\t\t\t\n\t\t\t\tself.buttons.append(Button(name, ret, pos, size, key, colour, font, font_colour, hl_colour, sel_colour).align_ctr())\n\t\t\telse:\n\t\t\t\tself.buttons.append(Button(name, ret, pos, size, key, colour, font, font_colour, hl_colour, sel_colour))\n\t\t\n\t\tif key != None:\n\t\t\tself.keystrokes.append(KeyStroke(name, key, ret))\n\t\n\tdef add_keystroke(self, name, key, ret= None):\n\t\tself.keystrokes.append(KeyStroke(name, key, ret))\n\t\n\tdef add_sp_keystroke(self, name, key, ret= None):\n\t\tself.sp_keystrokes.append(SpKeyStroke(name, key, ret))\n\t\n\tdef chk_actions(self, events):\n\t\tcurr_pos= pygame.mouse.get_pos()\n\t\tactions= []\n\t\t\n\t\tfor event in events:\n\t\t\t\n\t\t\tif event.type== pygame.QUIT:\n\t\t\t\tactions.append(\"Exit\")\n\t\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\n\t\t\t\tif event.button == 1:\n\t\n\t\t\t\t\tfor button in self.buttons + self.scroll_buttons:\n\t\n\t\t\t\t\t\tif isWithin(curr_pos, button.rect):\n\t\t\t\t\t\t\tself.press_sound.play()\n\t\t\t\t\t\t\tbutton.colour= button.sel_colour\n\t\n\t\t\t\t\t\t\tprint(f\"Button \\\"{button.name}\\\" clicked, return value : \\\"{button.ret}\\\"\")\n\t\t\t\t\t\t\tactions.append(button.ret)\n\t\n\t\t\t\t\tprint(f\"No buttons clicked! Cursor position : {curr_pos}\")\n\t\t\t\t\n\t\t\t\telif event.button == 4:\n\t\t\t\t\tprint(f\"Mouse Button 4 : Scroll up, scroll_pos : {self.scroll_pos}\")\n\n\t\t\t\t\tif self.scroll_pos > self.scroll_min:\n\t\t\t\t\t\tself.scroll_pos -= 30\n\t\t\t\t\t\n\t\t\t\t\t\tfor button in self.scroll_buttons:\n\t\t\t\t\t\t\tbutton.rect[1] += 30\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor item in self.scroll_items:\n\t\t\t\t\t\t\titem.rect[1] += 30\n\t\t\t\t\n\t\t\t\telif event.button == 5:\n\t\t\t\t\tprint(f\"Mouse Button 5 : Scroll down, scroll_pos : {self.scroll_pos}\")\n\t\t\t\t\t\n\t\t\t\t\tif self.scroll_pos < self.scroll_max:\n\t\t\t\t\t\tself.scroll_pos += 30\n\t\n\t\t\t\t\t\tfor button in self.scroll_buttons:\n\t\t\t\t\t\t\tbutton.rect[1] -= 30\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tfor item in self.scroll_items:\n\t\t\t\t\t\t\titem.rect[1] -= 30\n\n\t\t\t# elif event.type == pygame.MOUSEBUTTONUP:\n\t\t\t\t# for button in self.buttons:\n\t\t\t\t\t# button.colour= button.def_colour\n\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\t\t\n\t\t\t\tfor keystroke in self.keystrokes:\n\t\t\t\t\tif pygame.key.name(event.key) == keystroke.key:\n\t\t\t\t\t\t# print(f\"Keystroke \\\"{keystroke.name}\\\" key \\\"{keystroke.key}\\\" pressed, return value : \\\"{keystroke.ret}\\\"\")\n\t\t\t\t\t\tactions.append(keystroke.ret)\n\n\t\t\t\tfor keystroke in self.sp_keystrokes:\n\t\t\t\t\tif pygame.key.name(event.key) == keystroke.key:\n\t\t\t\t\t\tpass\n\t\t\t\t\t\t# print(f\"Special Keystroke \\\"{keystroke.name}\\\" key \\\"{keystroke.key}\\\" pressed, return value : \\\"{keystroke.ret}\\\" (down)\")\n\t\t\t\t\t\tactions.append(f\"{keystroke.ret} (down)\")\n\t\t\t\t\n\t\t\t\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\t\n\t\t\t\tfor keystroke in self.sp_keystrokes:\n\t\t\t\t\tif pygame.key.name(event.key) == keystroke.key:\n\t\t\t\t\t\tpass\n\t\t\t\t\t\t# print(f\"Special Keystroke \\\"{keystroke.name}\\\" key \\\"{keystroke.key}\\\" depressed, return value : \\\"{keystroke.ret}\\\" (up)\")\n\t\t\t\t\t\tactions.append(f\"{keystroke.ret} (up)\")\n\t\t\t\t\n\t\t\n\t\tfor button in self.buttons + self.scroll_buttons:\n\t\t\tif isWithin(curr_pos, button.rect):\n\t\t\t\tbutton.colour= rgb.GREY\n\t\t\telse:\n\t\t\t\tbutton.colour= button.def_colour\n\t\t\n\t\treturn actions\n\t\t\n\tdef draw_buttons(self, screen):\n\t\tfor button in self.buttons:\n# \t\t\ttext= button.font.render(button.name, 1, button.font_colour)\n# \t\t\ttext_len= text.get_width()\n# \t\t\tbutton.rect[2]= max( ( text_len, button.rect[2]))\n\t\n\t\t\tpygame.draw.rect(screen, button.colour, button.rect)\n\t\t\tscreen.blit(button.text, button.rect)\n\t\t\t\n\t\tfor button in self.scroll_buttons:\n\n\t\t\tpygame.draw.rect(screen, button.colour, button.rect)\n\t\t\tscreen.blit(button.text, button.rect)\n\n\nclass TextLine:\n\tdef __init__(self, text, font, pos, size= (50, 50), font_colour= rgb.WHITE):\n\t\tself.content= font.render(text, True, font_colour)\n\t\twidth= max(size[0], self.content.get_width())\n\t\theight= max(size[1], self.content.get_height())\n\t\tself.rect= pygame.Rect(pos[0], pos[1], width, height)\n\t\t\n\tdef align_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.center= self.rect.topleft\n\t\telse:\n\t\t\tself.rect.center= pos\n\t\t\n\t\treturn self\n\t\n\tdef align_top_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.centerx= self.rect.left\n\t\telse:\n\t\t\tself.rect.midtop= pos\n\t\t\n\t\treturn self\n\n\tdef align_top_right(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.topright= self.rect.topleft\n\t\telse:\n\t\t\tself.rect.topright= pos\n\t\t\n\t\treturn self\n\t\n\tdef align_mid_left(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.midleft= self.rect.topleft\n\t\telse:\n\t\t\tself.rect.midleft= pos\n\t\treturn self\n\t\n\tdef draw(self, screen):\n\t\tscreen.blit(self.content, self.rect)\n\n\t\t\nclass TextBox:\n\tdef __init__(self, text, font, pos, size= (400, 50), font_colour= rgb.WHITE):\n\t\twords= text.split(' ')\n\t\tcontents= []\n\t\tfor word in words:\n\t\t\tword_img= font.render(''.join([word, ' ']), True, font_colour)\n\t\t\tcontents.append(word_img)\n\t\t\t\n\t\tself.lines= []\n\t\tvert_offset= 0\n\t\t\n\t\t\n\t\twhile len(contents) > 0:\n\n\t\t\tline_width= contents[0].get_width()\n\t\n\t\t\tmax_height= contents[0].get_height()\t\t\n\t\t\t\n\t\t\tline= [(contents[0], [pos[0], pos[1] + vert_offset, line_width, max_height])]\n\t\n\t\t\tcontents.remove(contents[0])\n\t\t\t\n\t\t\twhile line_width <= size[0] and len(contents) > 0:\n\t\t\t\t\n\t\t\t\tcurr_width= contents[0].get_width()\n\t\t\t\t\n\t\t\t\tcurr_height= contents[0].get_height()\n\t\t\t\t\n\t\t\t\tline.append( (contents[0], [pos[0] + line_width, pos[1] + vert_offset, curr_width, curr_height] ))\n\t\t\t\t\n\t\t\t\tline_width += curr_width\n\t\t\t\t\n\t\t\t\tmax_height= max(max_height, curr_height)\n\t\t\t\t\n\t\t\t\tcontents.remove(contents[0])\n\t\n\t\t\tself.lines.append(line)\n\t\t\tvert_offset += max_height\n\t\tself.rect= pygame.Rect((pos), (size[0], vert_offset))\n\n\tdef align_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.center= self.rect.topleft\n\t\telse:\n\t\t\tself.rect.center= pos\n\t\t\n\t\treturn self\n\t\n\tdef draw(self, screen):\n\t\t\n\t\tfor line in self.lines:\n\t\t\tfor word in line:\n\t\t\t\tscreen.blit(word[0], word[1])\n\t\t\nclass Sprite:\n\tdef __init__(self, img, pos):\n\t\tself.img= pygame.image.load(img).convert_alpha()\n\t\tself.rect= pygame.Rect(pos, self.img.get_size())\n\t\n\tdef draw(self, screen):\n\t\tscreen.blit(self.img, (self.rect[0]-self.img.get_width() / 2, self.rect[1]-self.img.get_height() / 2))\n\t\t#screen.blit(self.img, self.img.get_rect().topleft)\n\t\n\tdef draw_raw(self, screen):\n\t\tscreen.blit(self.img, self.rect)\n\t\n\tdef align_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.center= self.rect.topleft\n\t\telse:\n\t\t\tself.rect.center= pos\n\t\t\n\t\treturn self\n\t\n\tdef align_top_ctr(self, pos= None):\n\t\tif pos is None:\n\t\t\tself.rect.centerx= self.rect.left\n\t\telse:\n\t\t\tself.rect.midtop= pos\n\t\t\n\t\treturn self\t\t", "sub_path": "UIManager.py", "file_name": "UIManager.py", "file_ext": "py", "file_size_in_byte": 8713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.font.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "name"}, {"api_name": "data_parser.get_sys_config", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "data_parser.get_sys_config", "line_number": 60, "usage_type": "call"}, {"api_name": "vlc.MediaPlayer", "line_number": 61, "usage_type": "call"}, {"api_name": "rgb.BLACK", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 72, "usage_type": "attribute"}, {"api_name": "rgb.YELLOW", "line_number": 73, "usage_type": "attribute"}, {"api_name": "rgb.GREY", "line_number": 73, "usage_type": "attribute"}, {"api_name": "rgb.GREEN", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 168, "usage_type": "attribute"}, {"api_name": "rgb.GREY", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 188, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 193, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 193, "usage_type": "attribute"}, {"api_name": "rgb.WHITE", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 202, "usage_type": "call"}, {"api_name": "rgb.WHITE", "line_number": 240, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 277, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 295, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 295, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 296, "usage_type": "call"}]} +{"seq_id": "394744498", "text": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom networkx.utils import make_str\nimport pydot_ng as pydot\n\nfrom nailgun import consts\n\n\nclass GraphVisualization(object):\n \"\"\"Wrapper for DeploymentGraph used for graph visualization.\"\"\"\n\n def __init__(self, graph):\n self._graph = graph\n\n def get_dotgraph(self, tasks=None, parents_for=None, remove=None):\n \"\"\"Get a graph representation in DOT format.\n\n :param tasks: list of tasks that will be used in deployemnt\n :param parents_for: name of task which parents will be shown\n :param remove: type of tasks to remove from graph visualization\n \"\"\"\n graph = self._graph.copy()\n\n if tasks:\n graph.only_tasks(tasks)\n\n if parents_for:\n parents = graph.predecessors(parents_for)\n parents.append(parents_for)\n graph = graph.subgraph(parents)\n\n if not remove:\n remove = []\n\n # NOTE(prmtl) it is not guaranted that node default\n # values will be put on top of DOT file so we must be sure\n # that each node will have correct attributes\n default_node_attrs = {\n 'color': 'yellowgreen',\n 'style': 'filled'\n }\n type_node_attrs_map = {\n consts.ORCHESTRATOR_TASK_TYPES.group: {\n 'color': 'lightskyblue',\n 'shape': 'box',\n 'style': 'filled, rounded',\n },\n consts.ORCHESTRATOR_TASK_TYPES.skipped: {\n 'color': 'gray95',\n },\n consts.ORCHESTRATOR_TASK_TYPES.stage: {\n 'shape': 'rect',\n 'color': 'red',\n 'style': 'filled',\n },\n }\n\n # set graph attributes for nodes\n for name, data in graph.nodes_iter(data=True):\n task_type = data.get('type')\n if task_type in remove:\n graph.remove_node(name)\n continue\n if data.get('skipped'):\n graph.node[name] = type_node_attrs_map[\n consts.ORCHESTRATOR_TASK_TYPES.skipped]\n else:\n graph.node[name] = type_node_attrs_map.get(\n task_type, default_node_attrs)\n return to_pydot(graph)\n\n\n# NOTE(prmtl): Adapted from networkx library to work with pydot_ng\ndef to_pydot(N, strict=True):\n \"\"\"Return a pydot graph from a NetworkX graph N.\n\n Parameters\n ----------\n N : NetworkX graph\n A graph created with NetworkX\n\n Examples\n --------\n >>> import networkx as nx\n >>> K5 = nx.complete_graph(5)\n >>> P = nx.to_pydot(K5)\n\n Notes\n -----\n\n\n \"\"\"\n # set Graphviz graph type\n if N.is_directed():\n graph_type = 'digraph'\n else:\n graph_type = 'graph'\n strict = N.number_of_selfloops() == 0 and not N.is_multigraph()\n\n name = N.graph.get('name')\n graph_defaults = N.graph.get('graph', {})\n if name is None:\n P = pydot.Dot(graph_type=graph_type, strict=strict, **graph_defaults)\n else:\n P = pydot.Dot('\"%s\"' % name, graph_type=graph_type, strict=strict,\n **graph_defaults)\n try:\n P.set_node_defaults(**N.graph['node'])\n except KeyError:\n pass\n try:\n P.set_edge_defaults(**N.graph['edge'])\n except KeyError:\n pass\n\n for n, nodedata in N.nodes_iter(data=True):\n str_nodedata = dict((k, make_str(v)) for k, v in nodedata.items())\n p = pydot.Node(make_str(n), **str_nodedata)\n P.add_node(p)\n\n if N.is_multigraph():\n for u, v, key, edgedata in N.edges_iter(data=True, keys=True):\n str_edgedata = dict((k, make_str(v)) for k, v in edgedata.items())\n edge = pydot.Edge(make_str(u), make_str(v),\n key=make_str(key), **str_edgedata)\n P.add_edge(edge)\n else:\n for u, v, edgedata in N.edges_iter(data=True):\n str_edgedata = dict((k, make_str(v)) for k, v in edgedata.items())\n edge = pydot.Edge(make_str(u), make_str(v), **str_edgedata)\n P.add_edge(edge)\n return P\n", "sub_path": "nailgun/nailgun/orchestrator/graph_visualization.py", "file_name": "graph_visualization.py", "file_ext": "py", "file_size_in_byte": 4722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "nailgun.consts.ORCHESTRATOR_TASK_TYPES", "line_number": 57, "usage_type": "attribute"}, {"api_name": "nailgun.consts", "line_number": 57, "usage_type": "name"}, {"api_name": "nailgun.consts.ORCHESTRATOR_TASK_TYPES", "line_number": 62, "usage_type": "attribute"}, {"api_name": "nailgun.consts", "line_number": 62, "usage_type": "name"}, {"api_name": "nailgun.consts.ORCHESTRATOR_TASK_TYPES", "line_number": 65, "usage_type": "attribute"}, {"api_name": "nailgun.consts", "line_number": 65, "usage_type": "name"}, {"api_name": "nailgun.consts.ORCHESTRATOR_TASK_TYPES", "line_number": 80, "usage_type": "attribute"}, {"api_name": "nailgun.consts", "line_number": 80, "usage_type": "name"}, {"api_name": "pydot_ng.Dot", "line_number": 117, "usage_type": "call"}, {"api_name": "pydot_ng.Dot", "line_number": 119, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 131, "usage_type": "call"}, {"api_name": "pydot_ng.Node", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 137, "usage_type": "call"}, {"api_name": "pydot_ng.Edge", "line_number": 138, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 138, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 139, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 143, "usage_type": "call"}, {"api_name": "pydot_ng.Edge", "line_number": 144, "usage_type": "call"}, {"api_name": "networkx.utils.make_str", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "403547842", "text": "import sys\n\nimport requests\n\n# 上層目錄import\nsys.path.append(\".\")\n\n\nclass Weather:\n def __init__(self):\n self.location_name: str = \"\"\n # 風向,單位 度,一般風向 0 表示無風\n self.wind_direction: str = \"\"\n # 風速,單位 公尺/秒\n self.wind_speed: int = 0\n # 小時最大陣風風速,單位 公尺/秒\n self.h_fx: int = 0\n # 溫度,單位 攝氏\n self.temperature: int = 0\n # 最高溫度\n self.max_temperature: int = 0\n # 最低溫度\n self.min_temperature: int = 0\n # 相對濕度,單位 百分比率\n self.humidity: int = 0\n # 日累積雨量,單位 毫米\n self.rain: int = 0\n # 紫外線強度\n self.uvi: int = 0\n # 天氣描述 (中文)\n self.wx: str = \"\"\n # 空氣品質\n self.aqi: int = 0\n\n def fetch_data(self, lat, lng):\n \"\"\"抓取天氣資料\n\n Args:\n lat (float): 緯度\n lng (float): 經度\n\n Data Source:\n app.wemega.tw: \"天氣即時預報\" App\n \"\"\"\n response = requests.get(f\"https://app.wmega.tw/v1/all/{lat}/{lng}\").json()\n return self.parse_data(data=response)\n\n def parse_data(self, data):\n \"\"\"解析資料\n\n Args:\n data (JSON): 天氣資料內容\n \"\"\"\n self.location_name = data[\"now\"][\"name\"]\n self.wind_direction = data[\"now\"][\"wdir\"]\n self.wind_speed = data[\"now\"][\"wdsd\"]\n self.h_fx = data[\"now\"][\"h_fx\"]\n self.temperature = data[\"now\"][\"temp\"]\n self.humidity = data[\"now\"][\"humd\"]\n self.rain = data[\"now\"][\"h24r\"]\n self.uvi = data[\"now\"][\"uviValue\"]\n self.max_temperature = data[\"now\"][\"maxT\"]\n self.min_temperature = data[\"now\"][\"minT\"]\n self.wx = data[\"now\"][\"wx\"]\n self.aqi = data[\"aqi\"][\"content\"][\"aqiValue\"]\n", "sub_path": "project/weather/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "622528561", "text": "\"\"\"\nUtility functions that operate on landlab grids.\n------------------------------------------------\n\n\"\"\"\n\n\nimport numpy as np\nfrom six.moves import range\n\n\ndef resolve_values_on_active_links(grid, active_link_values):\n \"\"\"Resolve active-link values into x and y directions.\n\n Takes a set of values defined on active links, and returns those values\n resolved into the x and y directions. Two link arrays are returned:\n x, then y.\n\n Parameters\n ----------\n grid : ModelGrid\n A ModelGrid.\n active_link_values : ndarray\n Values on active links.\n\n Returns\n -------\n tuple of ndarray\n Values resolved into x-component and y-component.\n \"\"\"\n link_lengths = grid.length_of_link[grid.active_links]\n return (\n np.multiply(((grid.node_x[grid._activelink_tonode] -\n grid.node_x[grid._activelink_fromnode]) /\n link_lengths), active_link_values),\n np.multiply(((grid.node_y[grid._activelink_tonode] -\n grid.node_y[grid._activelink_fromnode]) /\n link_lengths), active_link_values))\n\n\ndef resolve_values_on_links(grid, link_values):\n \"\"\"Resolve link values into x and y directions.\n\n Takes a set of values defined on active links, and returns those values\n resolved into the x and y directions. Two link arrays are returned:\n x, then y.\n\n Parameters\n ----------\n grid : ModelGrid\n A ModelGrid.\n link_values : ndarray\n Values on links.\n\n Returns\n -------\n tuple of ndarray\n Values resolved into x-component and y-component.\n \"\"\"\n return (\n np.multiply(((grid.node_x[grid.node_at_link_head] -\n grid.node_x[grid.node_at_link_tail]) /\n grid.length_of_link), link_values),\n np.multiply(((grid.node_y[grid.node_at_link_head] -\n grid.node_y[grid.node_at_link_tail]) /\n grid.length_of_link), link_values))\n\n\ndef calculate_flux_divergence_at_nodes(grid, active_link_flux, out=None):\n \"\"\"Calculate flux divergence at grid nodes.\n\n Same as calculate_flux_divergence_at_active_cells, but works with and\n returns a list of net unit fluxes that corresponds to all nodes, rather\n than just active cells.\n\n Note that we don't compute net unit fluxes at\n boundary nodes (which don't have active cells associated with them, and\n often don't have cells of any kind, because they are on the perimeter),\n but simply return zeros for these entries. The advantage is that the\n caller can work with node-based arrays instead of active-cell-based\n arrays.\n\n Parameters\n ----------\n grid : ModelGrid\n A ModelGrid.\n active_link_flux : ndarray\n Fluxes at active links.\n out : ndarray, optional\n Buffer to hold the result.\n\n Returns\n -------\n ndarray\n Net unit fluxes at nodes.\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> from landlab.grid.grid_funcs import calculate_flux_divergence_at_nodes\n\n >>> grid = RasterModelGrid((4, 5))\n >>> link_flux = np.ones(grid.number_of_active_links, dtype=float)\n >>> flux_at_node = calculate_flux_divergence_at_nodes(grid, link_flux)\n ... # doctest: +NORMALIZE_WHITESPACE\n >>> flux_at_node\n array([ 0., 1., 1., 1., 0.,\n 1., 0., 0., 0., -1.,\n 1., 0., 0., 0., -1.,\n 0., -1., -1., -1., 0.])\n >>> flux_at_node[grid.core_nodes]\n array([ 0., 0., 0., 0., 0., 0.])\n\n This is *deprecated*. Instead use ``calc_flux_div_at_node`. Notice that\n fluxes at non-core nodes are handled differently. However, these boundary\n nodes don't have \"flux\" anyway and so should be ignored.\n\n >>> grid = RasterModelGrid((4, 5))\n >>> link_flux = grid.zeros(at='link')\n >>> link_flux[grid.active_links] = 1.\n >>> flux_at_node = grid.calc_flux_div_at_node(link_flux)\n >>> flux_at_node\n array([ 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0.])\n >>> flux_at_node[grid.core_nodes]\n array([ 0., 0., 0., 0., 0., 0.])\n \"\"\"\n assert len(active_link_flux) == grid.number_of_active_links, (\n \"incorrect length of active_link_flux array\")\n\n # If needed, create net_unit_flux array\n if out is None:\n out = grid.empty(at='node')\n out.fill(0.)\n net_unit_flux = out\n\n assert len(net_unit_flux) == grid.number_of_nodes\n\n # Create a flux array one item longer than the number of active links.\n # Populate it with flux times face width (so, total flux rather than\n # unit flux). Here, face_width is an array with one entry for each\n # active link, so we are multiplying the unit flux at each link by the\n # width of its corresponding face.\n flux = np.zeros(len(active_link_flux) + 1)\n flux[:len(active_link_flux)] = active_link_flux * grid.width_of_face\n\n # Next, we need to add up the incoming and outgoing fluxes.\n #\n # Notes:\n # 1) because \"net flux\" is defined as positive outward, we add the\n # outflux and subtract the influx\n # 2) the loop is over the number of rows in the inlink/outlink\n # matrices. This dimension is equal to the maximum number of links\n # attached to a node, so should be of order 6 or 7 and won't\n # generally increase with the number of nodes in the grid.\n #\n for i in range(np.size(grid._node_active_inlink_matrix, 0)):\n net_unit_flux += flux[grid._node_active_outlink_matrix[i][:]]\n net_unit_flux -= flux[grid._node_active_inlink_matrix[i][:]]\n\n # Now divide by cell areas ... where there are core cells.\n node_at_active_cell = grid.node_at_cell[grid.core_cells]\n net_unit_flux[node_at_active_cell] /= grid.area_of_cell[grid.core_cells]\n\n return net_unit_flux\n", "sub_path": "landlab/grid/grid_funcs.py", "file_name": "grid_funcs.py", "file_ext": "py", "file_size_in_byte": 5904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.multiply", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "299554347", "text": "import os\nimport torch\nimport logging\nimport Filesystem\n\nSTART_SENTENCE_TOKEN = \"[CLS]\"\nEND_SEP_TOKEN = \"[SEP]\"\n\n\ndef compute_sentence_dBert_vector(model, tokenizer, sentence_text):\n\n toks = tokenizer.tokenize(START_SENTENCE_TOKEN + sentence_text + END_SEP_TOKEN)\n indices = tokenizer.convert_tokens_to_ids(toks)\n\n segment_ids = [1] * len(indices)# single-sentence inputs only require a series of 1s\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor(indices).unsqueeze(0)\n segment_tensor = torch.tensor(segment_ids).unsqueeze(0)\n\n with torch.no_grad():\n last_layer = model(tokens_tensor, segment_tensor)[0]\n # last_hidden_state: torch.FloatTensor of shape (batch_size, sequence_length, hidden_size) (here [1, 17, 768])\n\n # To get a single vector for our entire sentence we have multiple application-dependent choices, in terms of\n # methods (mean, max, concatenation, etc.) and layers used (last four, all, last layer, etc.).\n # A simple approach is to average the (/second-to-)last hidden layer of each token, producing one 768-length vector\n\n sentence_embedding = torch.mean(last_layer, dim=1)[0] # batch size 1\n logging.debug(sentence_embedding.shape)\n return sentence_embedding\n\n", "sub_path": "VocabularyAndEmbeddings/EmbedWithDBERT.py", "file_name": "EmbedWithDBERT.py", "file_ext": "py", "file_size_in_byte": 1253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.tensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "464627774", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Copyright © 2014 German Neuroinformatics Node (G-Node)\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted under the terms of the BSD License. See\n LICENSE file in the root of the Project.\n\n Author: Jan Grewe \n\n This tutorial shows how regulary sampled data is stored in nix-files.\n See https://github.com/G-node/nix/wiki for more information.\n\"\"\"\n\nimport nixio as nix\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef create_sinewave(duration=1, freq=10, stepsize=0.01):\n x = np.arange(0, duration*2*np.pi, stepsize)\n y = np.sin(freq*x)\n return x, y\n\n\ndef plot_data(data_array):\n x_axis = data_array.dimensions[0]\n x = x_axis.axis(data_array.data.shape[0])\n y = data_array.data[:]\n plt.plot(x, y, marker=\".\", markersize=5)\n plt.xlabel(x_axis.label + \" [\" + x_axis.unit + \"]\")\n plt.ylabel(data_array.label + \" [\" + data_array.unit + \"]\")\n plt.title(data_array.name)\n plt.xlim(0, np.max(x))\n plt.ylim((1.1 * np.min(y), 1.1 * np.max(y)))\n plt.show()\n\n\nif __name__ == \"__main__\":\n # fake some data\n duration = 1.\n frequency = 5\n stepsize = 0.02\n x, y = create_sinewave(duration, frequency, stepsize)\n\n # create a new file overwriting any existing content\n file_name = 'regular_data_example.h5'\n file = nix.File.open(file_name, nix.FileMode.Overwrite)\n\n # create a 'Block' that represents a grouping object. Here, the recording session.\n # it gets a name and a type\n block = file.create_block(\"block name\", \"nix.session\")\n\n # create a 'DataArray' to take the sinewave, add some information about the signal\n data = block.create_data_array(\"sinewave\", \"nix.regular_sampled\", data=y)\n data.unit = \"mV\"\n data.label = \"voltage\"\n # add a descriptor for the xaxis\n dim = data.append_sampled_dimension(stepsize)\n dim.unit = \"s\"\n dim.label = \"time\"\n dim.offset = 0.0 # optional\n\n # let's plot the data from the stored information\n plot_data(data)\n file.close()\n", "sub_path": "docs/source/examples/regularlySampledData.py", "file_name": "regularlySampledData.py", "file_ext": "py", "file_size_in_byte": 2106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pylab.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlim", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pylab.ylim", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pylab.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 39, "usage_type": "name"}, {"api_name": "nixio.File.open", "line_number": 51, "usage_type": "call"}, {"api_name": "nixio.File", "line_number": 51, "usage_type": "attribute"}, {"api_name": "nixio.FileMode", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "351220660", "text": "from bs4 import BeautifulSoup\nfrom mobile_extractor import *\nimport urllib\nimport csv\nimport urllib.request\nfrom city_calc import *\n\n\ndef innerHTML(element):\n return element.decode_contents(formatter=\"html\")\n\ndef get_name(body):\n\treturn body.find('span', {'class':'jcn'}).a.string\n\ndef get_phone_number(body):\n\ttry:\n\t\tscore = str(body.find('p', {'class':'contact-info'}).span)\n\t\t# print(score)\n\t\t# print(type(score))\n\t\tph_no = number_extract(score)\n\t\t# print(ph_no)\n\t\t# print(score)\n\t\treturn ph_no\n\texcept AttributeError:\n\t\treturn ''\n\ndef get_address(body):\n\treturn body.find('span', {'class':'mrehover'}).text.strip()\n\ndef get_location(body):\n\ttext = body.find('a', {'class':'rsmap'})\n\tif text == None:\n\t\treturn\n\ttext_list = text['onclick'].split(\",\")\n\t\n\tlatitutde = text_list[3].strip().replace(\"'\", \"\")\n\tlongitude = text_list[4].strip().replace(\"'\", \"\")\n\t\n\treturn latitutde + \", \" + longitude\n\ndef contact_hospital(cityy):\n\n\tservice_count = 1\n\n\tfields = ['Name', 'Phone', 'Address', 'Location']\n\tout_file = open('Hospital_Contact.csv','w')\n\tcsvwriter = csv.DictWriter(out_file, delimiter=',', fieldnames=fields)\n\n\t# Write fields first\n\t#csvwriter.writerow(dict((fn,fn) for fn in fields))\n\n\turl=\"https://www.justdial.com/\"+cityy+\"/Hospital\"\n\tprint(url)\n\treq = urllib.request.Request(url, headers={'User-Agent' : \"Magic Browser\"}) \n\tpage = urllib.request.urlopen( req )\n\t# page=urllib2.urlopen(url)\n\n\tsoup = BeautifulSoup(page.read(), \"html.parser\")\n\tservices = soup.find_all('li', {'class': 'cntanr'})\n\n\t# Iterate through the 10 results in the page\n\tfor service_html in services:\n\n\t\t# Parse HTML to fetch data\n\t\tdict_service = {}\n\t\tname = get_name(service_html)\n\t\tphone = get_phone_number(service_html)\n\t\tif len(phone) > 10 or '-' in phone:\n\t\t\tcontinue\n\t\taddress = get_address(service_html)\n\t\tlocation = get_location(service_html)\n\t\tif name != None:\n\t\t\tdict_service['Name'] = name\n\t\tif phone != None:\n\t\t\t# print('getting phone number')\n\t\t\tdict_service['Phone'] = phone\n\t\tif address != None:\n\t\t\tdict_service['Address'] = address\n\t\tif location != None:\n\t\t\tdict_service['Address'] = location\n\n\t\t# Write row to CSV\n\t\tcsvwriter.writerow(dict_service)\n\n\t\tprint(\"#\" + str(service_count) + \" \" , dict_service)\n\t\tservice_count += 1\n\n\tout_file.close()\n\n\n\n\n\n\n", "sub_path": "SIH_Final-master/jd_scraper_hospital.py", "file_name": "jd_scraper_hospital.py", "file_ext": "py", "file_size_in_byte": 2253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "csv.DictWriter", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 54, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 55, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "51125941", "text": "#!/usr/bin/env python\n\"\"\"\nAdd seizure names to LR results\nInput: LR_results, name = \"obs_LRs.{species}.txt\"\n Seizure file matching sample names to seizures\n\"\"\"\n\nimport argparse\n\ndef run(input_file, seizure_file): \n with open(input_file, 'r') as infile:\n header = infile.readline().strip().split('\\t')\n file_head = header\n samps = {}\n for i, line in enumerate(infile):\n line = line.strip().split('\\t')\n samps[i] = line\n with open(seizure_file, 'r') as infile:\n header = infile.readline()\n seizures = {}\n for line in infile:\n line = line.strip().split('\\t')\n seizures[line[1]] = line[0]\n output_file = input_file.replace('.txt', '.seizures.txt')\n with open(output_file, 'w') as outfile:\n outfile.write('\\t'.join(file_head) + '\\tseizure1\\tseizure2\\n')\n for k, v in samps.iteritems():\n s1 = v[0]\n s2 = v[1]\n seizure1 = seizures[s1]\n seizure2 = seizures[s2]\n outfile.write('\\t'.join(v) + '\\t' + seizure1 + '\\t' + seizure2 + '\\n')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--input_file', \n help='Name of input LR_file')\n parser.add_argument('--seizure_file',\n help='Name of seizure master file')\n args = parser.parse_args()\n run(args.input_file, args.seizure_file)\n", "sub_path": "data_analysis/post_processing/1_add_seizures.py", "file_name": "1_add_seizures.py", "file_ext": "py", "file_size_in_byte": 1504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 37, "usage_type": "attribute"}]} +{"seq_id": "618714890", "text": "# Import utilities\r\nimport datetime\r\nfrom enum import Enum\r\n\r\n\r\nclass Person:\r\n # Define gender enum\r\n class Sex(Enum):\r\n MALE = 1\r\n FEMALE = 2\r\n\r\n # Defining a method to print cumulative data for the family\r\n def print_data(self):\r\n print(self.name.title() + \" \" + self.last_name.title() + \", \" + self.age + \" years old, \"\r\n + str(self.sex.name).lower() + \".\")\r\n\r\n # 2050 birthday calculator (by importing datetime to avoid inserting current year manually)\r\n def print_future_age(self):\r\n current_year = int(datetime.datetime.now().year)\r\n future_birthday = 2050 - (current_year - int(self.age))\r\n return str(future_birthday)\r\n\r\n # Method to print family's preferences based on their favorite seasons\r\n def preference(self):\r\n if self.season.lower() == \"spring\":\r\n print(self.name.title() + \" likes flowers. Watch out for allergies!\")\r\n elif self.season.lower() == \"summer\":\r\n print(self.name.title() + \" likes swimming, sun tanning, and Super Paradise!\")\r\n elif self.season.lower() == \"autumn\":\r\n print(self.name.title() + \" likes rain. What a nostalgic personality...\")\r\n elif self.season.lower() == \"winter\":\r\n print(self.name.title() + \" likes snow. Let's go snowboarding!\")\r\n else:\r\n print(\" - Sorry, I cannot work anymore. Please restart me.\")\r\n", "sub_path": "Python version/Person.py", "file_name": "Person.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "enum.Enum", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "432095531", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport time\nimport signal\nimport argparse\nimport project_root\nfrom os import path\nfrom subprocess import Popen, call\nfrom helpers.helpers import get_open_udp_port\n\n\ndef run(args):\n # run worker.py on ps and worker hosts\n for job_name in ['ps', 'worker']:\n host_list = args[job_name + '_list']\n procs = args[job_name + '_procs']\n\n for i in xrange(len(host_list)):\n ssh_cmd = ['ssh', host_list[i]]\n\n cmd = ['python', args['worker_src'],\n '--ps-hosts', args['ps_hosts'],\n '--worker-hosts', args['worker_hosts'],\n '--job-name', job_name,\n '--task-index', str(i)]\n if args['dagger']:\n cmd.append('--dagger')\n if args['driver'] is not None:\n cmd += ['--driver', args['driver']]\n\n cmd = ssh_cmd + cmd\n\n sys.stderr.write('$ %s\\n' % ' '.join(cmd))\n procs.append(Popen(cmd, preexec_fn=os.setsid))\n\n # ps will block forever\n for ps_proc in args['ps_procs']:\n ps_proc.communicate()\n\n\ndef cleanup(args):\n all_procs = args['ps_procs'] + args['worker_procs']\n for proc in all_procs:\n try:\n os.killpg(os.getpgid(proc.pid), signal.SIGTERM)\n except OSError as e:\n sys.stderr.write('%s\\n' % e)\n\n host_set = set(args['ps_list'] + args['worker_list'])\n pkill_script = path.join(args['rlcc_dir'], 'helpers', 'pkill.py')\n\n for host in host_set:\n kill_cmd = ['ssh', host, 'python', pkill_script, args['rlcc_dir']]\n sys.stderr.write('$ %s\\n' % ' '.join(kill_cmd))\n call(kill_cmd)\n\n sys.stderr.write('\\nAll cleaned up.\\n')\n\n\ndef construct_args(prog_args):\n # construct a dictionary of arguments\n args = {}\n\n # file paths\n args['rlcc_dir'] = prog_args.rlcc_dir\n args['worker_src'] = path.join(args['rlcc_dir'], 'a3c', 'worker.py')\n\n # hostnames and processes\n args['ps_hosts'] = prog_args.ps_hosts\n args['worker_hosts'] = prog_args.worker_hosts\n\n args['ps_list'] = prog_args.ps_hosts.split(',')\n args['worker_list'] = prog_args.worker_hosts.split(',')\n args['username'] = prog_args.username\n\n for i, host in enumerate(args['ps_list']):\n args['ps_list'][i] = args['username'] + '@' + host.split(':')[0]\n\n for i, host in enumerate(args['worker_list']):\n args['worker_list'][i] = args['username'] + '@' + host.split(':')[0]\n\n args['ps_procs'] = []\n args['worker_procs'] = []\n args['dagger'] = prog_args.dagger\n args['driver'] = prog_args.driver\n\n return args\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--ps-hosts', required=True, metavar='[HOSTNAME:PORT, ...]',\n help='comma-separated list of hostname:port of parameter servers')\n parser.add_argument(\n '--worker-hosts', required=True, metavar='[HOSTNAME:PORT, ...]',\n help='comma-separated list of hostname:port of workers')\n parser.add_argument(\n '--username', default='ubuntu',\n help='username used in ssh connection (default: ubuntu)')\n parser.add_argument(\n '--rlcc-dir', metavar='DIR', default='/home/ubuntu/RLCC',\n help='absolute path to RLCC/ (default: /home/ubuntu/RLCC)')\n parser.add_argument('--dagger', action='store_true',\n help='run Dagger rather than A3C')\n parser.add_argument('--driver', help='hostname of the driver')\n prog_args = parser.parse_args()\n args = construct_args(prog_args)\n\n # run worker.py on ps and worker hosts\n try:\n run(args)\n except KeyboardInterrupt:\n pass\n finally:\n cleanup(args)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "a3c/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stderr.write", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 35, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 36, "usage_type": "call"}, {"api_name": "os.setsid", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.killpg", "line_number": 47, "usage_type": "call"}, {"api_name": "os.getpgid", "line_number": 47, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 56, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "591538766", "text": "#coding=utf-8\n# from selenium import webdriver\n# import time\n# browser=webdriver.Chrome()\n# browser.get(\"http://www.baidu.com\")\n# time.sleep(5)\n# browser.quit()\n# from selenium import webdriver\n# driver=webdriver.Chrome()\n# driver.get(\"http://www.baidu.com\")\n# print(\"浏览器最大化\")\n# driver.maximize_window()#浏览器最大化\n# driver.quit()\n# from selenium import webdriver\n# driver=webdriver.Chrome()\n# driver.get(\"http://www.baidu.com\")\n# #参数数字为像素点\n# print(\"设置浏览器宽480、高800显示\")\n# driver.set_window_size(480,800)#设置浏览器的宽和高\n# driver.quit()\nfrom selenium import webdriver\nimport time\ndriver=webdriver.Chrome()\nfirst_url='http://www.baidu.com'\nprint(\"now access %s\"%first_url)\ndriver.get(first_url) #访问百度首页\ntime.sleep(5)\nsecond_url='http://news.baidu.com'\nprint(\"now access %s\"%second_url)\ndriver.get(second_url)#访问新闻页面\ntime.sleep(5)\nprint(\"back to %s\"%first_url)\ndriver.back()#浏览器后退\ntime.sleep(5)\nprint(\"forward to %s\"%second_url)\ndriver.forward()#浏览器前进\ntime.sleep(5)\ndriver.quit()", "sub_path": "Webtest/practice.py", "file_name": "practice.py", "file_ext": "py", "file_size_in_byte": 1087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 23, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "389166434", "text": "# usage `python3 data_migration.py [optional \"loop\"] [optional project to resume (must include [loop] or [file] for arg 1)]`\n# example `python3 data_migration.py loop`\n# example `python2 data_migration.py file 350`\nimport datetime\nimport json\nimport psycopg2\nimport shutil\nimport os\nimport glob\nimport csv\nimport re\nimport subprocess\nimport sys\nfrom config import config\n\nparams = config()\n\n# gets the right path depending upon whether developer uses gradle or maven\n# compile dir for maven is target and for gradle it's build\nisdir = os.path.isdir\njsonpath = r\"../../../../target/classes/json\"\nif not isdir(jsonpath):\n jsonpath = r\"../../../../build/resources/main/json\"\n\ncsvpath = r\"../../../../target/classes/csv\"\nif not isdir(csvpath):\n csvpath = r\"../../../../build/resources/main/csv\"\ncsvScriptPath = r\"../csv\"\n\nshppath = r\"../../../../target/classes/shp\"\nif not isdir(shppath):\n shppath = r\"../../../../build/resources/main/shp\"\nshpScriptPath = r\"../shp\"\n\ndef truncate_all_tables():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"TRUNCATE TABLE users RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE institutions RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE institution_users RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE imagery RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE projects RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE project_widgets RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE plots RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE samples RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE sample_values RESTART IDENTITY CASCADE\")\n cur.execute(\"TRUNCATE TABLE roles RESTART IDENTITY CASCADE\")\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_users():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n dirname = os.path.dirname(os.path.realpath(__file__))\n user_list_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"user-list.json\"))), \"r\")\n users = json.load(user_list_json)\n print(len(users))\n for user in users:\n try:\n cur.execute(\"select * from add_user_migration(%s, %s::text, %s::text)\", (user[\"id\"], user[\"email\"], user[\"password\"]))\n except: pass\n conn.commit()\n cur.execute(\"select * from add_user_migration(%s, %s::text, %s::text)\", (-1, \"guest\", \"dkh*&jlkjadfjk&^58342bmdjkjhf(*&0984\"))\n cur.execute(\"SELECT * FROM set_admin()\")\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_institutions():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur1 = conn.cursor()\n dirname = os.path.dirname(os.path.realpath(__file__))\n institution_list_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"institution-list.json\"))), \"r\")\n institutions = json.load(institution_list_json)\n print(len(institutions))\n for institution in institutions:\n members=institution[\"members\"]\n admins=institution[\"admins\"]\n pendingUsers=institution[\"pending\"]\n cur.execute(\"select * from add_institution_migration(%s, %s::text, %s::text, %s::text, %s::text, %s)\",\n (institution[\"id\"], institution[\"name\"], institution[\"logo\"], institution[\"description\"], institution[\"url\"], institution[\"archived\"]))\n conn.commit()\n role_id = -1\n user_id = -1\n for member in members:\n if member in admins:\n role_id = 1\n elif member in pendingUsers:\n role_id = 3\n else:\n role_id = 2\n if isinstance(member , int):\n try:\n cur1.execute(\"select * from add_institution_user(%s, %s, %s)\", (institution[\"id\"], member, role_id))\n except: pass\n conn.commit()\n for pending in pendingUsers:\n if pending not in members and pending not in admins:\n try:\n cur1.execute(\"select * from add_institution_user(%s, %s, %s)\", (institution[\"id\"], pending, 3))\n except: pass\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_imagery():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n dirname = os.path.dirname(os.path.realpath(__file__))\n imagery_list_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"imagery-list.json\"))), \"r\")\n imageryArr = json.load(imagery_list_json)\n print(len(imageryArr))\n for imagery in imageryArr:\n if imagery[\"institution\"] > 0:\n try:\n cur.execute(\"select * from add_institution_imagery_migration(%s, %s, %s::text, %s::text, %s::text, %s::jsonb, %s::jsonb)\",\n (imagery[\"id\"], imagery[\"institution\"], imagery[\"visibility\"], imagery[\"title\"],\n imagery[\"attribution\"], json.dumps(imagery[\"extent\"]), json.dumps(imagery[\"sourceConfig\"])))\n except: pass\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_project_widgets(project_id, dash_id, conn):\n try:\n cur = conn.cursor()\n dirname = os.path.dirname(os.path.realpath(__file__))\n dash_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"dash-\" + dash_id + \".json\"))), \"r\")\n widget = json.load(dash_json)\n if widget[\"projectID\"] is not None and int(project_id)==int(widget[\"projectID\"]) and len(str(widget[\"widgets\"]))>2:\n for awidget in widget[\"widgets\"]:\n cur.execute(\"select * from add_project_widget(%s, %s::uuid, %s::jsonb)\",\n (widget[\"projectID\"], widget[\"dashboardID\"], json.dumps(awidget)))\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"project widgets: \"+ str(error))\n\ndef insert_projects():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n dirname = os.path.dirname(os.path.realpath(__file__))\n project_list_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"project-list.json\"))), \"r\")\n projectArr = json.load(project_list_json)\n project_dash_list_json = open(os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"proj.json\"))), \"r\")\n dashArr = json.load(project_dash_list_json)\n print(len(projectArr))\n for project in projectArr:\n try:\n if project[\"id\"] > (int(sys.argv[2]) if len(sys.argv) > 2 else 0):\n print(\"inserting project with project id \" + str(project[\"id\"]))\n if project[\"numPlots\"] is None: project[\"numPlots\"]=0\n if project[\"plotSpacing\"] is None: project[\"plotSpacing\"]=0\n if project[\"plotSize\"] is None: project[\"plotSize\"]=0\n if project[\"samplesPerPlot\"] is None: project[\"samplesPerPlot\"]=0\n if project[\"sampleResolution\"] is None: project[\"sampleResolution\"]=0\n if not (\"projectTemplate\" in project): project[\"projectTemplate\"]=0\n if not (\"surveyRules\" in project): project[\"surveyRules\"]=[]\n if not (\"created_date\" in project): project[\"created_date\"]=None\n if not (\"published_date\" in project): project[\"published_date\"]=None\n if not (\"closed_date\" in project): project[\"closed_date\"]=None\n if not (\"archived_date\" in project): project[\"archived_date\"]=None\n if not (\"projectOptions\" in project): project[\"projectOptions\"]=None\n\n cur.execute(\"select * from create_project_migration(%s, %s, %s::text, %s::text, %s::text, %s::text, \"\n + \"ST_SetSRID(ST_GeomFromGeoJSON(%s), 4326), %s, %s::text, %s, %s, %s::text, %s, %s::text, \"\n + \"%s, %s, %s::jsonb, %s::jsonb, %s::jsonb, %s::date, %s::date, %s::date, %s::date, %s::jsonb)\",\n (project[\"id\"], project[\"institution\"], project[\"availability\"],\n project[\"name\"], project[\"description\"], project[\"privacyLevel\"], project[\"boundary\"].replace(\"\\\\\", \"\"),\n project[\"imageryId\"], project[\"plotDistribution\"], project[\"numPlots\"],\n project[\"plotSpacing\"], project[\"plotShape\"], project[\"plotSize\"], project[\"sampleDistribution\"],\n project[\"samplesPerPlot\"], project[\"sampleResolution\"], json.dumps(project[\"sampleValues\"]),\n json.dumps(project[\"surveyRules\"]),None, project[\"created_date\"], project[\"published_date\"],\n project[\"closed_date\"], project[\"archived_date\"], json.dumps(project[\"projectOptions\"])))\n\n project_id = project[\"id\"]\n for dash in dashArr:\n dash_id = dash[\"dashboard\"]\n try:\n if int(dash[\"projectID\"]) == int(project_id):\n insert_project_widgets(project_id,dash_id,conn)\n except: pass\n\n if len(sys.argv) > 1 and sys.argv[1] == \"loop\":\n ## insert data by row with loops\n insert_plots(project_id, conn)\n else:\n ## insert data the entire json file at a time\n conn.commit()\n insert_plots_samples_by_file(project_id, conn)\n\n conn.commit()\n # merge in external files\n merge_files(project, project_id, conn)\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"project for loop: \"+ str(error))\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"project outer: \"+ str(error))\n finally:\n if conn is not None:\n conn.close()\n\ndef insert_plots_samples_by_file(project_id, mainconn):\n conn = psycopg2.connect(**params)\n cur_plot = conn.cursor()\n try:\n print(\"insert by file\")\n dirname = os.path.dirname(os.path.realpath(__file__))\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"plot-data-\" + str(project_id) + \".json\")))\n if os.path.isfile(filename):\n filedata = open(filename, \"r\").read()\n cur_plot.execute(\"select * from add_plots_by_json(%s, %s::text)\", (project_id, filedata))\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"plot file error: \"+ str(error))\n conn.commit()\n cur_plot.close()\n conn.close()\n insert_plots(project_id, mainconn)\n cur_plot.close()\n conn.close()\n\ndef insert_plots(project_id, conn):\n print(\"inserting plot the old way\")\n cur_plot = conn.cursor()\n user_plot_id=-1\n dirname = os.path.dirname(os.path.realpath(__file__))\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, jsonpath , \"plot-data-\" + str(project_id) + \".json\")))\n if os.path.isfile(filename):\n plot_list_json = open(filename, \"r\")\n plotArr = json.load(plot_list_json)\n for plot in plotArr:\n try:\n boolean_Flagged=plot[\"flagged\"]\n if plot[\"flagged\"] == False:\n plot[\"flagged\"] = 0\n else:\n plot[\"flagged\"] = 1\n\n if not (\"collectionStart\" in plot): plot[\"collectionStart\"] = None\n if not (\"collectionTime\" in plot) or re.search(\"^\\d+$\", plot[\"collectionTime\"]) is None : plot[\"collectionTime\"]=None\n\n cur_plot.execute(\"select * from create_project_plot(%s, ST_SetSRID(ST_GeomFromGeoJSON(%s), 4326))\",\n (project_id, plot[\"center\"]))\n\n plot_id = cur_plot.fetchone()[0]\n if plot[\"user\"] is not None:\n user_plot_id = insert_user_plots(plot_id, plot, boolean_Flagged, conn)\n insert_samples(plot_id, plot[\"samples\"], user_plot_id, conn)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"plots error: \"+ str(error))\n cur_plot.close()\n\ndef insert_user_plots(plot_id, plot, flagged, conn):\n user_plot_id = -1\n cur_up = conn.cursor()\n cur_user = conn.cursor()\n cur_user.execute(\"select user_uid from users where email=%s;\", [plot[\"user\"]])\n rows = cur_user.fetchall()\n if len(rows)>0:\n try:\n cur_up.execute(\"select * from add_user_plots_migration(%s, %s::text, %s, to_timestamp(%s/1000.0)::timestamp, to_timestamp(%s/1000.0)::timestamp)\",\n (plot_id, plot[\"user\"], flagged, plot[\"collectionStart\"], plot[\"collectionTime\"]))\n user_plot_id = cur_up.fetchone()[0]\n conn.commit()\n user_plot_id = 1\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"user plots error: \"+ str(error))\n cur_up.close()\n cur_user.close()\n return user_plot_id\n\ndef insert_samples(plot_id, samples, user_plot_id, conn):\n cur_sample = conn.cursor()\n for sample in samples:\n try:\n cur_sample.execute(\"select * from create_project_plot_sample(%s, ST_SetSRID(ST_GeomFromGeoJSON(%s), 4326))\",\n (plot_id, sample[\"point\"] ))\n\n sample_id = cur_sample.fetchone()[0]\n if user_plot_id != -1 and \"value\" in sample:\n if not (\"userImage\" in sample):\n sample[\"image_id\"] = None\n sample[\"image_attributes\"] = None\n else:\n sample[\"image_id\"] = sample[\"userImage\"][\"id\"]\n sample[\"image_attributes\"] = sample[\"userImage\"][\"attributes\"]\n insert_sample_values(user_plot_id, sample_id, sample[\"value\"], sample[\"image_id\"], sample[\"image_attributes\"], conn)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"samples error: \"+ str(error))\n cur_sample.close()\n\ndef insert_sample_values(user_plot_id, sample_id, sample_value, image_id, image_value, conn):\n cur_sv = conn.cursor()\n try:\n cur_sv.execute(\"select * from add_sample_values_migration(%s, %s, %s::jsonb, %s, %s::jsonb)\",\n (user_plot_id, sample_id, json.dumps(sample_value), image_id, json.dumps(image_value)))\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"sample values error: \"+ str(error))\n conn.commit()\n cur_sv.close()\n\n# builds list of old name to become lon, lat\ndef csvColRenameList(csvCols):\n renameCol = []\n renameCol.append([csvCols[0].replace(\" \", \"\"), \"lon\"])\n renameCol.append([csvCols[1].replace(\" \", \"\"), \"lat\"])\n return renameCol\n\n# files should have lat lon in the first 2 columns, returns string\ndef csvHeaderToCol(csvCols):\n colsStr = \"\"\n for i, h in enumerate(csvCols):\n h = h.replace(\" \", \"\")\n if len(h) > 0:\n if i <=1:\n colsStr += h + \" float,\"\n elif (i == 2 and h.upper() == \"PLOTID\") or (i == 3 and h.upper() == \"SAMPLEID\"):\n colsStr += h + \" integer,\"\n else:\n colsStr += h + \" text,\"\n return colsStr[:-1]\n\ndef checkRequiredCols(actualCols, reqCols):\n for col in reqCols:\n if not (col.upper() in (acol.upper() for acol in actualCols)):\n return []\n return actualCols\n\ndef isFloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n# returns list\ndef loadCsvHeaders(csvfile):\n with open(csvfile, \"r\") as fin:\n csvin = csv.reader(fin)\n headers = next(csvin, [])\n firstRow = next(csvin, [])\n if headers[0].upper() == \"PLOTID\" or headers[0].upper() == \"ID\" or (not isFloat(firstRow[0]) or -180.0 > float(firstRow[0]) > 180.0 or\n not isFloat(firstRow[1]) or -90.0 > float(firstRow[1]) > 90.0):\n print(\"Error with columns in file \" + csvfile)\n return []\n else:\n return headers\n\ndef merge_files(project, project_id, conn):\n try:\n print(\"merging external files\")\n cur = conn.cursor()\n plots_table = \"\"\n samples_table = \"\"\n dirname = os.path.dirname(os.path.realpath(__file__))\n need_to_update = 0\n fileprefix = \"project-\" + str(project_id)\n tableprefix = \"project_\" + str(project_id)\n\n # old files do not have a plotId, all columns are extra\n oldFilename = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath , fileprefix + \".csv\")))\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath , fileprefix + \"-plots.csv\")))\n\n if project[\"plotDistribution\"] == \"csv\" and (os.path.isfile(oldFilename)):\n csv_headers = loadCsvHeaders(oldFilename)\n if len(csv_headers) > 0:\n plots_table = tableprefix + \"_plots_csv\"\n need_to_update = 1\n cur.execute(\"DROP TABLE IF EXISTS ext_tables.\" + plots_table)\n cur.execute(\"SELECT * FROM create_new_table(%s, %s)\",\n [plots_table, csvHeaderToCol(csv_headers)])\n conn.commit()\n\n\n # run sh to upload csv to postgres\n dirname = os.path.dirname(os.path.realpath(__file__))\n shpath = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath)))\n subprocess.run([\"bash\", \"csv2postgres-alt.sh\", fileprefix, fileprefix + \"-plots\"], cwd=shpath, stdout=subprocess.PIPE)\n\n # add index\n cur.execute(\"SELECT * FROM add_index_col(%s)\" , [plots_table])\n conn.commit()\n\n #rename cols\n try:\n colList = csvColRenameList(csv_headers)\n if colList[0][0].upper() != colList[0][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [plots_table, colList[0][0], colList[0][1]])\n conn.commit()\n\n if colList[1][0].upper() != colList[1][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [plots_table, colList[1][0], colList[1][1]])\n conn.commit()\n except:\n pass\n\n # if column plotid does exist, it is not the same as the newer plotId field\n try:\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [plots_table, \"plotId\", \"plotId2\"])\n conn.commit()\n except:\n conn.commit()\n pass\n # no requirements for plotId mean that even if the field exists its not valid. Add empty column for\n # backwords compatibility\n cur.execute(\"SELECT * FROM add_plotId_col(%s)\" , [plots_table])\n conn.commit()\n\n elif project[\"plotDistribution\"] == \"csv\" and (os.path.isfile(filename)):\n csv_headers = loadCsvHeaders(filename)\n if len(checkRequiredCols(csv_headers, [\"plotId\"])) > 0:\n plots_table = tableprefix + \"_plots_csv\"\n need_to_update = 2\n cur.execute(\"DROP TABLE IF EXISTS ext_tables.\" + plots_table)\n cur.execute(\"SELECT * FROM create_new_table(%s, %s)\",\n [plots_table, csvHeaderToCol(csv_headers)])\n conn.commit()\n\n # run sh to upload csv to postgres\n dirname = os.path.dirname(os.path.realpath(__file__))\n shpath = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath)))\n subprocess.run([\"bash\", \"csv2postgres.sh\", fileprefix + \"-plots\"], cwd=shpath, stdout=subprocess.PIPE)\n\n # add index\n cur.execute(\"SELECT * FROM add_index_col(%s)\" , [plots_table])\n conn.commit()\n\n #rename cols\n try:\n colList = csvColRenameList(csv_headers)\n if colList[0][0].upper() != colList[0][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [plots_table, colList[0][0], colList[0][1]])\n conn.commit()\n\n if colList[1][0].upper() != colList[1][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [plots_table, colList[1][0], colList[1][1]])\n conn.commit()\n except:\n pass\n\n else:\n print (csv_headers)\n\n if project[\"plotDistribution\"] == \"shp\":\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath , fileprefix + \"-plots.zip\")))\n templateFilename = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath , \"project-\" + str(project[\"projectTemplate\"]) + \"-plots.zip\")))\n if (not os.path.isfile(filename)) and os.path.isfile(templateFilename):\n shutil.copy(templateFilename, filename)\n\n if os.path.isfile(filename):\n need_to_update = 2\n plots_table = \"project_\" + str(project_id) + \"_plots_shp\"\n cur.execute(\"DROP TABLE IF EXISTS ext_tables.\" + plots_table)\n conn.commit()\n # run sh\n dirname = os.path.dirname(os.path.realpath(__file__))\n shpath = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath)))\n subprocess.run([\"bash\", \"shp2postgres.sh\", fileprefix + \"-plots\"], cwd=shpath, stdout=subprocess.PIPE)\n\n # ### Samples\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath , fileprefix + \"-samples.csv\")))\n if project[\"sampleDistribution\"] == \"csv\" and os.path.isfile(filename):\n csv_headers = loadCsvHeaders(filename)\n if len(checkRequiredCols(csv_headers, [\"plotId\", \"sampleId\"])) > 0:\n samples_table = \"project_\" + str(project_id) + \"_samples_csv\"\n need_to_update = 3\n cur.execute(\"DROP TABLE IF EXISTS ext_tables.\" + samples_table)\n cur.execute(\"SELECT * FROM create_new_table(%s, %s)\",\n [samples_table, csvHeaderToCol(csv_headers)])\n conn.commit()\n\n # run sh to upload csv to postgres\n dirname = os.path.dirname(os.path.realpath(__file__))\n shpath = os.path.abspath(os.path.realpath(os.path.join(dirname, csvpath)))\n subprocess.run([\"bash\", \"csv2postgres.sh\", fileprefix + \"-samples\"], cwd=shpath, stdout=subprocess.PIPE)\n\n # add index\n cur.execute(\"SELECT * FROM add_index_col(%s)\" , [samples_table])\n\n #rename cols\n try:\n colList = csvColRenameList(csv_headers)\n if colList[0][0].upper() != colList[0][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [samples_table, colList[0][0], colList[0][1]])\n conn.commit()\n\n if colList[1][0].upper() != colList[1][1].upper():\n cur.execute(\"SELECT * FROM rename_col(%s, %s, %s)\" , [samples_table, colList[1][0], colList[1][1]])\n conn.commit()\n except:\n pass\n\n if project[\"sampleDistribution\"] == \"shp\":\n filename = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath , fileprefix + \"-samples.zip\")))\n templateFilename = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath , \"project-\" + str(project[\"projectTemplate\"]) + \"-samples.zip\")))\n if (not os.path.isfile(filename)) and os.path.isfile(templateFilename):\n shutil.copy(templateFilename, filename)\n\n if os.path.isfile(filename):\n need_to_update = 3\n\n samples_table = \"project_\" + str(project_id) + \"_samples_shp\"\n cur.execute(\"DROP TABLE IF EXISTS ext_tables.\" + samples_table)\n conn.commit()\n # run sh\n dirname = os.path.dirname(os.path.realpath(__file__))\n shpath = os.path.abspath(os.path.realpath(os.path.join(dirname, shppath)))\n subprocess.run([\"bash\", \"shp2postgres.sh\", fileprefix + \"-samples\"], cwd=shpath, stdout=subprocess.PIPE)\n\n if need_to_update > 0:\n try:\n # add table names to project\n cur.execute(\"SELECT * FROM update_project_tables(%s, %s, %s)\" , [project_id, plots_table, samples_table])\n conn.commit()\n\n if need_to_update >= 2:\n # clean up project tables\n cur.execute(\"SELECT * FROM cleanup_project_tables(%s, %s)\" , [project_id, project[\"plotSize\"]])\n conn.commit()\n\n if need_to_update == 3:\n # merge files into plots and samples\n cur.execute(\"SELECT * FROM merge_plot_and_file(%s)\" , [project_id])\n conn.commit()\n else:\n # merge files into plots\n cur.execute(\"SELECT * FROM merge_plots_only(%s)\" , [project_id])\n conn.commit()\n finally:\n pass\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"merge file error: \"+ str(error))\n conn.commit()\n cur.close()\n\ndef insert_roles():\n conn = None\n try:\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"INSERT INTO roles VALUES (%s, %s::text)\", (1, \"admin\"))\n cur.execute(\"INSERT INTO roles VALUES (%s, %s::text)\", (2, \"member\"))\n cur.execute(\"INSERT INTO roles VALUES (%s, %s::text)\", (3, \"pending\"))\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\ndef update_sequence():\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"select * from update_sequence('users', 'user_uid')\")\n cur.execute(\"select * from update_sequence('institutions', 'institution_uid')\")\n cur.execute(\"select * from update_sequence('imagery', 'imagery_uid')\")\n cur.execute(\"select * from update_sequence('projects', 'project_uid')\")\n cur.execute(\"select * from update_sequence('user_plots', 'user_plot_uid')\")\n cur.execute(\"select * from update_sequence('sample_values', 'sample_value_uid')\")\n cur.execute(\"select * from update_sequence('plots', 'plot_uid')\")\n\nif __name__ == \"__main__\":\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n csvScripts = os.listdir(csvScriptPath)\n for f in csvScripts:\n if f.endswith(\".sh\"):\n shutil.copy(csvScriptPath+\"/\"+f, csvpath)\n\n shpScripts = os.listdir(shpScriptPath)\n for f in shpScripts:\n if f.endswith(\".sh\"):\n shutil.copy(shpScriptPath+\"/\"+f, shppath)\n\n if (len(sys.argv) <= 2):\n truncate_all_tables()\n print(\"inserting users\")\n insert_users()\n print(\"inserting roles\")\n insert_roles()\n print(\"inserting institutions\")\n insert_institutions()\n print(\"inserting imagery\")\n insert_imagery()\n\n print(\"inserting projects\")\n insert_projects()\n print(\"Done with projects\")\n update_sequence()\n print(\"Done migration\")\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n", "sub_path": "src/main/resources/scripts/data_migration.py", "file_name": "data_migration.py", "file_ext": "py", "file_size_in_byte": 28687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "config.config", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 52, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 76, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "json.load", "line_number": 90, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 120, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "json.load", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "json.load", "line_number": 155, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 159, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 162, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "json.load", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "json.load", "line_number": 174, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 178, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 200, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 202, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 212, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 223, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 227, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path", "line_number": 238, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 261, "usage_type": "call"}, {"api_name": "re.search", "line_number": 271, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 281, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 298, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 321, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 329, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 330, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 372, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path", "line_number": 388, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path", "line_number": 394, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path", "line_number": 395, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 397, "usage_type": "call"}, {"api_name": "os.path", "line_number": 397, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 410, "usage_type": "call"}, {"api_name": "os.path", "line_number": 410, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 410, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 410, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 411, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 411, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path", "line_number": 453, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 454, "usage_type": "call"}, {"api_name": "os.path", "line_number": 454, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 454, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 454, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 455, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 455, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path", "line_number": 478, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 480, "usage_type": "call"}, {"api_name": "os.path", "line_number": 480, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 481, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 483, "usage_type": "call"}, {"api_name": "os.path", "line_number": 483, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 490, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 491, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 491, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 494, "usage_type": "call"}, {"api_name": "os.path", "line_number": 494, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 494, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 494, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 495, "usage_type": "call"}, {"api_name": "os.path", "line_number": 495, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 506, "usage_type": "call"}, {"api_name": "os.path", "line_number": 506, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 506, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path", "line_number": 507, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 507, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 508, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 508, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path", "line_number": 527, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path", "line_number": 528, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path", "line_number": 529, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 532, "usage_type": "call"}, {"api_name": "os.path", "line_number": 532, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 539, "usage_type": "call"}, {"api_name": "os.path", "line_number": 539, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 539, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 540, "usage_type": "call"}, {"api_name": "os.path", "line_number": 540, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 540, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 540, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 541, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 541, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 565, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 573, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 580, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 586, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 597, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 597, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 598, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 601, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 603, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 606, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 608, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 624, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 624, "usage_type": "attribute"}]} +{"seq_id": "49902436", "text": "#!/usr/bin/env python\n\"\"\"\nTesting k-means clustering\nfor purely random and normally distributed data\n\"\"\"\nimport os\nimport math\nimport random\nfrom numpy import array, random as numpy_random\nfrom ase.data import chemical_symbols\n\nfrom kmeans import Point, kmeans, k_from_n\nfrom element_groups import get_element_group\nfrom set_path import VIS_PATH\n\n\nDISTRIB = 'GAUSSIAN'\ndata, ref = [], []\nN = 200\n\ndef gaussian_distribution(N, k):\n n = float(N)/k\n X = []\n for i in range(k):\n init = (random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1))\n s = random.uniform(0.05, 0.5)\n x = []\n while len(x) < n:\n a, b, c = array([numpy_random.normal(init[0], s), numpy_random.normal(init[1], s), numpy_random.normal(init[2], s)])\n if abs(a) < 1 and abs(b) < 1 and abs(c) < 1:\n x.append([a,b,c])\n X.extend(x)\n X = array(X)[:N]\n return X\n\nif DISTRIB == 'RANDOM':\n set_x, set_y = [random.choice(chemical_symbols) for i in range(N)], [random.choice(chemical_symbols) for i in range(N)]\n set_z = [round(random.uniform(0.1, 15.0), 2) for i in range(N)]\n data, ref = [], []\n for i in range(N):\n formula = set_x[i] + set_y[i]\n set_x[i] = get_element_group(chemical_symbols.index(set_x[i]))\n set_y[i] = get_element_group(chemical_symbols.index(set_y[i]))\n data.append(Point([set_x[i], set_y[i], set_z[i]], formula))\n ref.append([set_x[i], set_y[i], set_z[i]])\n\nelse:\n nte = len(chemical_symbols)\n G = gaussian_distribution(N, k_from_n(N))\n\n set_x = (G[:,0] + 1)/2*nte\n set_x = map(lambda x: int(math.floor(x)), set_x.tolist())\n\n set_y = (G[:,1] + 1)/2*nte\n set_y = map(lambda x: int(math.floor(x)), set_y.tolist())\n\n set_z = (G[:,2] + 1)/2*15\n set_z = map(lambda x: round(x, 2), set_z.tolist())\n\nfor i in range(N):\n formula = chemical_symbols[set_x[i]] + chemical_symbols[set_y[i]]\n set_x[i] = get_element_group(set_x[i])\n set_y[i] = get_element_group(set_y[i])\n data.append(Point([set_x[i], set_y[i], set_z[i]]))\n ref.append([set_x[i], set_y[i], set_z[i], formula])\n\nclusters = kmeans(data, k_from_n(len(data)))\n\npoints_file = os.path.join(VIS_PATH, \"points.csv\")\ncluster_file = os.path.join(VIS_PATH, \"clusters.csv\")\n\nwith open(points_file, \"w\") as s:\n s.write(\"x,y,z,label\\n\")\n for n, i in enumerate(ref):\n s.write(\",\".join(map(str, i)) + \"\\n\")\n\nwith open(cluster_file, \"w\") as s:\n s.write(\"x,y,z\\n\")\n for n, c in enumerate(clusters, 1):\n for p in c.points:\n s.write(\",\".join(map(str, p.coords)) + \"\\n\")\n s.write(\"-,-,-\\n\")\n\nprint(points_file)\nprint(cluster_file)", "sub_path": "tutorials/simple_data_mining/sample_kmeans.py", "file_name": "sample_kmeans.py", "file_ext": "py", "file_size_in_byte": 2672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "random.uniform", "line_number": 25, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 37, "usage_type": "argument"}, {"api_name": "random.uniform", "line_number": 38, "usage_type": "call"}, {"api_name": "element_groups.get_element_group", "line_number": 42, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols.index", "line_number": 42, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 42, "usage_type": "name"}, {"api_name": "element_groups.get_element_group", "line_number": 43, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols.index", "line_number": 43, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 43, "usage_type": "name"}, {"api_name": "kmeans.Point", "line_number": 44, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 48, "usage_type": "argument"}, {"api_name": "kmeans.k_from_n", "line_number": 49, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 52, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 55, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 61, "usage_type": "name"}, {"api_name": "element_groups.get_element_group", "line_number": 62, "usage_type": "call"}, {"api_name": "element_groups.get_element_group", "line_number": 63, "usage_type": "call"}, {"api_name": "kmeans.Point", "line_number": 64, "usage_type": "call"}, {"api_name": "kmeans.kmeans", "line_number": 67, "usage_type": "call"}, {"api_name": "kmeans.k_from_n", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "set_path.VIS_PATH", "line_number": 69, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "set_path.VIS_PATH", "line_number": 70, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "196535454", "text": "# License: Apache 2.0. See LICENSE file in root directory.\n# Copyright(c) 2020 Intel Corporation. All Rights Reserved.\n\n#test:device L500*\n#test:device D400*\n\nimport platform\nimport pyrealsense2 as rs\nfrom rspy import test\nfrom rspy import log\nimport time\n\ndev = test.find_first_device_or_exit()\ndepth_sensor = dev.first_depth_sensor()\ncolor_sensor = dev.first_color_sensor()\n\nprevious_depth_frame_number = -1\nprevious_color_frame_number = -1\nafter_set_option = False\n\n\ndef get_allowed_drops(): \n global after_set_option\n # On Linux, there is a known issue (RS5-7148) where up to 4 frame drops can occur\n # sequentially after setting control values during streaming... on Windows this\n # does not occur.\n if platform.system() == 'Linux' and after_set_option:\n return 4\n # Our KPI is to prevent sequential frame drops, therefore single frame drop is allowed.\n return 1\n\ndef set_new_value(sensor, option, value): \n global after_set_option\n after_set_option = True\n sensor.set_option(option, value)\n time.sleep( 0.5 ) # collect frames\n after_set_option = False\n\ndef check_depth_frame_drops(frame):\n global previous_depth_frame_number\n allowed_drops = get_allowed_drops()\n test.check_frame_drops(frame, previous_depth_frame_number, allowed_drops)\n previous_depth_frame_number = frame.get_frame_number()\n\ndef check_color_frame_drops(frame):\n global previous_color_frame_number\n allowed_drops = get_allowed_drops()\n test.check_frame_drops(frame, previous_color_frame_number, allowed_drops)\n previous_color_frame_number = frame.get_frame_number()\n\n# Use a profile that's common to both L500 and D400\ndepth_profile = next(p for p in\n depth_sensor.profiles if p.fps() == 30\n and p.stream_type() == rs.stream.depth\n and p.format() == rs.format.z16\n and p.as_video_stream_profile().width() == 640\n and p.as_video_stream_profile().height() == 480)\n\ncolor_profile = next(p for p in color_sensor.profiles if p.fps() == 30\n and p.stream_type() == rs.stream.color\n and p.format() == rs.format.yuyv\n and p.as_video_stream_profile().width() == 640\n and p.as_video_stream_profile().height() == 480)\n\ndepth_sensor.open( depth_profile )\ndepth_sensor.start( check_depth_frame_drops )\ncolor_sensor.open( color_profile )\ncolor_sensor.start( check_color_frame_drops )\n\n\n#############################################################################################\n# Test #1\n\nlaser_power = rs.option.laser_power\ncurrent_laser_control = 10\n\ntest.start(\"Checking for frame drops when setting laser power several times\")\n\nfor i in range(1,5): \n new_value = current_laser_control + 10*i\n set_new_value(depth_sensor, laser_power, new_value)\n\ntest.finish()\n\n# reset everything back\ndepth_sensor.set_option( rs.option.visual_preset, int(rs.l500_visual_preset.max_range) )\n\n\n\n#############################################################################################\n# Test #2\n\ntime.sleep(0.5) # jic\n\ndepth_options = depth_sensor.get_supported_options()\ncolor_options = color_sensor.get_supported_options()\n\ntest.start(\"Checking for frame drops when setting any option\")\n\nfor option in depth_options:\n try:\n if depth_sensor.is_option_read_only(option): \n continue\n old_value = depth_sensor.get_option( option )\n range = depth_sensor.get_option_range( option )\n new_value = range.min\n if old_value == new_value:\n new_value = range.max\n if not log.d( str(option), old_value, '->', new_value ):\n test.info( str(option), new_value, persistent = True )\n set_new_value( depth_sensor, option, new_value )\n depth_sensor.set_option( option, old_value )\n except: \n test.unexpected_exception()\n test.abort()\n finally:\n test.reset_info( persistent = True )\n\nfor option in color_options:\n try:\n if color_sensor.is_option_read_only(option): \n continue\n new_value = color_sensor.get_option_range(option).min\n set_new_value(color_sensor, option, new_value)\n except: \n option_name = \"Color sensor - \" + str(option)\n test.info(option_name, new_value)\n test.unexpected_exception()\n test.abort()\n\ntest.finish()\n\n\n#############################################################################################\ndepth_sensor.stop()\ndepth_sensor.close()\n\ncolor_sensor.stop()\ncolor_sensor.close()\n\ntest.print_results_and_exit()\n", "sub_path": "unit-tests/func/test-set-option.py", "file_name": "test-set-option.py", "file_ext": "py", "file_size_in_byte": 4565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rspy.test.find_first_device_or_exit", "line_number": 13, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 13, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "rspy.test.check_frame_drops", "line_number": 42, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 42, "usage_type": "name"}, {"api_name": "rspy.test.check_frame_drops", "line_number": 48, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 48, "usage_type": "name"}, {"api_name": "pyrealsense2.stream", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pyrealsense2.option", "line_number": 74, "usage_type": "attribute"}, {"api_name": "rspy.test.start", "line_number": 77, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 77, "usage_type": "name"}, {"api_name": "rspy.test.finish", "line_number": 83, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 83, "usage_type": "name"}, {"api_name": "pyrealsense2.option", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pyrealsense2.l500_visual_preset", "line_number": 86, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "rspy.test.start", "line_number": 98, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 98, "usage_type": "name"}, {"api_name": "rspy.log.d", "line_number": 109, "usage_type": "call"}, {"api_name": "rspy.log", "line_number": 109, "usage_type": "name"}, {"api_name": "rspy.test.info", "line_number": 110, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 110, "usage_type": "name"}, {"api_name": "rspy.test.unexpected_exception", "line_number": 114, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 114, "usage_type": "name"}, {"api_name": "rspy.test.abort", "line_number": 115, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 115, "usage_type": "name"}, {"api_name": "rspy.test.reset_info", "line_number": 117, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 117, "usage_type": "name"}, {"api_name": "rspy.test.info", "line_number": 127, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 127, "usage_type": "name"}, {"api_name": "rspy.test.unexpected_exception", "line_number": 128, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 128, "usage_type": "name"}, {"api_name": "rspy.test.abort", "line_number": 129, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 129, "usage_type": "name"}, {"api_name": "rspy.test.finish", "line_number": 131, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 131, "usage_type": "name"}, {"api_name": "rspy.test.print_results_and_exit", "line_number": 141, "usage_type": "call"}, {"api_name": "rspy.test", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "481478560", "text": "#守护进程\n#守护进程会随着主进程的代码执行结束而结束\n#正常的子进程没有执行完的时候主进程要一直等着\n#守护进程不能再开户子进程\n\nimport time\nfrom multiprocessing import Process\ndef cal_time():\n while True:\n time.sleep(1)\n print(\"过去了1s\")\n\nif __name__ == '__main__':\n p = Process(target=cal_time)\n p.daemon = True # 一定在开启进程之前设置\n p.start()\n for i in range(100):\n time.sleep(0.1)\n print('*' * i)\n", "sub_path": "process/process_daemon.py", "file_name": "process_daemon.py", "file_ext": "py", "file_size_in_byte": 517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 10, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "389482296", "text": "import notifications\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom app.views import ProjectViewSet, TaskViewSet, UserViewSet, ChatRoomViewSet, ChatViewSet\nfrom rest_framework import routers\nfrom django.conf import settings\nadmin.autodiscover()\n\nrouter = routers.DefaultRouter()\nrouter.register(r'project', ProjectViewSet)\nrouter.register(r'task', TaskViewSet)\nrouter.register(r'users', UserViewSet)\nrouter.register(r'chat-room', ChatRoomViewSet)\n#router.register(r'chat', ChatViewSet)\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'mybeez.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url('^inbox/notifications/', include(notifications.urls)),\n url(r'', include('user_sessions.urls', 'user_sessions')),\n url(r'^api/', include(router.urls)),\n url(r'^$', 'app.views.home', name='home'),\n url(r'^login$', 'app.views.loginAngular', name='loginAngular'),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^new_project$', 'app.views.new_project', name='new_project'),\n url(r'^profile$', 'app.views.profile', name='profile'),\n url(r'^new_task$', 'app.views.new_task', name='new_task'),\n url(r'^node_api$', 'app.views.node_api', name='node_api'),\n url(r'^task_post$', 'app.views.task_post', name='task_post'),\n url(r'^save_message$', 'app.views.save_message', name='save_message'),\n url(r'^chat$', 'app.views.ChatViewSet', name='chat'),\n url(r'^project$', 'app.views.get_project', name='get_project'),\n url(r'^room_check$', 'app.views.room_check', name='room_check'),\n url(r'^logout$', 'django.contrib.auth.views.logout', {'next_page': '/'})\n \n)\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n )\n", "sub_path": "mybeez/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2033, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 7, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 9, "usage_type": "name"}, {"api_name": "app.views.ProjectViewSet", "line_number": 10, "usage_type": "argument"}, {"api_name": "app.views.TaskViewSet", "line_number": 11, "usage_type": "argument"}, {"api_name": "app.views.UserViewSet", "line_number": 12, "usage_type": "argument"}, {"api_name": "app.views.ChatRoomViewSet", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.patterns", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "notifications.urls", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "425628398", "text": "import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\n\nEPOCHS=2000 \ntraining_data = np.array([[0,0],[0,1],[1,0],[1,1]], \"float32\")\ntarget_data = np.array([[0],[1],[1],[0]], \"float32\")\n\nmodel = Sequential()\nmodel.add(Dense(16, input_shape=(2,), activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\nprint(\"-----------not train\")\nprint(model.predict(training_data))\n\nhistory = model.fit(training_data, target_data, \n epochs=EPOCHS, verbose=0)\n\nprint(\"----------- train\")\nprint(model.predict(training_data[3:]))\n# model.evaluate(training_data, target_data, steps=2)", "sub_path": "day16/mykeras02.py", "file_name": "mykeras02.py", "file_ext": "py", "file_size_in_byte": 693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 9, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "63775064", "text": "import json, sys, time\n\n# const\ndevice_keys = ['name', 'brand', 'codename', 'specs']\nteam_keys = ['full_name', 'country', 'github_username']\n\ntry:\n devices = json.loads(open('../../devices.json').read())\nexcept:\n print('Cannot load devices.json properly, Try again after correcting the format.')\n time.sleep(5)\n sys.exit(1)\n\ntry:\n teams = json.loads(open('../../teams.json').read())\nexcept:\n print('Cannot load teams.json properly, Try again after correcting the format.')\n time.sleep(5)\n sys.exit(1)\n\nfor device in devices:\n keys = list(device.keys())\n check = all(x in keys for x in device_keys) and len(device_keys) MAX_LENGTH:\n continue\n if len(tokens_tgt) < MIN_LENGTH or len(tokens_tgt) > MAX_LENGTH:\n continue\n seq_src = []\n seq_tgt = []\n for word in tokens_src:\n if word not in vocab_src:\n vocab_src[word] = len(vocab_src)\n seq_src.append(str(vocab_src[word]))\n for word in tokens_tgt:\n if word not in vocab_tgt:\n vocab_tgt[word] = len(vocab_tgt)\n seq_tgt.append(str(vocab_tgt[word]))\n data.append((seq_src, seq_tgt))\n data.sort(key = lambda x: len(x[0]), reverse = True) # sort by source sequence length\n fo.close()\n return data, vocab_src, vocab_tgt\n\ndef save_data(data):\n fo = open(sys.argv[1] + \".csv\", \"w\")\n for seq in data:\n fo.write(\" \".join(seq[0]) + \"\\t\" + \" \".join(seq[1]) + \"\\n\")\n fo.close()\n\ndef save_vocab(vocab, ext):\n fo = open(sys.argv[1] + \".vocab.\" + ext, \"w\")\n for word, _ in sorted(vocab.items(), key = lambda x: x[1]):\n fo.write(\"%s\\n\" % word)\n fo.close()\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n sys.exit(\"Usage: %s training_data\" % sys.argv[0])\n data, vocab_src, vocab_tgt= load_data()\n save_data(data)\n save_vocab(vocab_src, \"src\")\n save_vocab(vocab_tgt, \"tgt\")\n", "sub_path": "prepare.py", "file_name": "prepare.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "model.PAD", "line_number": 10, "usage_type": "name"}, {"api_name": "model.EOS", "line_number": 10, "usage_type": "name"}, {"api_name": "model.SOS", "line_number": 10, "usage_type": "name"}, {"api_name": "model.PAD", "line_number": 11, "usage_type": "name"}, {"api_name": "model.EOS", "line_number": 11, "usage_type": "name"}, {"api_name": "model.SOS", "line_number": 11, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "utils.tokenize", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.tokenize", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "230453446", "text": "#!/usr/bin/python3\n\"\"\"List all State objects from db\"\"\"\nimport sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom model_state import Base, State\n\n\ndef first_state():\n \"\"\" Arguments argv to connect to database\n argv[1]: mysql username\n argv[2]: mysql password\n argv[3]: database name\n \"\"\"\n engine = create_engine(\"mysql+mysqldb://{}:{}@localhost/{}\"\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n\n Base.metadata.create_all(engine)\n session = Session(engine)\n var = session.query(State).first()\n if var:\n print(\"{}: {}\".format(var.__dict__['id'], var.__dict__['name']))\n else:\n print(\"Nothing\")\n\n session.close()\n\nif __name__ == \"__main__\":\n first_state()\n", "sub_path": "0x0F-python-object_relational_mapping/8-model_state_fetch_first.py", "file_name": "8-model_state_fetch_first.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "model_state.Base.metadata.create_all", "line_number": 19, "usage_type": "call"}, {"api_name": "model_state.Base.metadata", "line_number": 19, "usage_type": "attribute"}, {"api_name": "model_state.Base", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "model_state.State", "line_number": 21, "usage_type": "argument"}]} +{"seq_id": "361776652", "text": "\"\"\"\nCopyright (c) 2018-2020, Jairus Martin.\n\nDistributed under the terms of the MIT License.\n\nThe full license is in the file LICENSE.txt, distributed with this software.\n\nCreated on Aug 2, 2018\n\n@author: jrm\n\"\"\"\nimport os\nimport logging\nimport datetime\nimport weakref\nimport asyncio\nimport sqlalchemy as sa\nfrom decimal import Decimal\nfrom atom import api\nfrom atom.atom import AtomMeta\nfrom atom.api import (\n Atom, Subclass, ContainerList, Int, Dict, Instance, Typed, Property, Str,\n ForwardInstance, Value, Bool, List\n)\nfrom functools import wraps\nfrom sqlalchemy.engine import ddl, strategies\nfrom sqlalchemy.sql import schema\nfrom sqlalchemy.orm.query import Query\nfrom sqlalchemy import func\n\n\nfrom .base import (\n ModelManager, ModelSerializer, Model, ModelMeta, find_subclasses,\n JSONModel, JSONSerializer\n)\n\n\n# kwargs reserved for sqlalchemy table columns\nCOLUMN_KWARGS = (\n 'autoincrement', 'default', 'doc', 'key', 'index', 'info', 'nullable',\n 'onupdate', 'primary_key', 'server_default', 'server_onupdate',\n 'quote', 'unique', 'system', 'comment'\n)\nFK_TYPES = (api.Instance, api.Typed, api.ForwardInstance, api.ForwardTyped)\n\n# ops that can be used with django-style queries\nQUERY_OPS = {\n 'eq': '__eq__',\n 'gt': '__gt__',\n 'gte': '__ge__',\n 'ge': '__ge__',\n 'lt': '__lt__',\n 'le': '__le__',\n 'lte': '__le__',\n 'all': 'all_',\n 'any': 'any_',\n 'ne': '__ne__',\n 'not': '__ne__',\n 'contains': 'contains',\n 'endswith': 'endswith',\n 'ilike': 'ilike',\n 'in': 'in_',\n 'is': 'is_',\n 'is_distinct_from': 'is_distinct_from',\n 'isnot': 'isnot',\n 'isnot_distinct_from': 'isnot_distinct_from',\n 'like': 'like',\n 'match': 'match',\n 'notilike': 'notilike',\n 'notlike': 'notlike',\n 'notin': 'notin_',\n 'startswith': 'startswith',\n}\n\n# Fields supported on the django style Meta class of a model\nVALID_META_FIELDS = (\n 'db_table', 'unique_together', 'abstract', 'constraints', 'triggers'\n)\n\n# Constraint naming conventions\nCONSTRAINT_NAMING_CONVENTIONS = {\n \"ix\": \"ix_%(column_0_label)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_N_name)s\",\n # Using \"ck_%(table_name)s_%(constraint_name)s\" is preferred but it causes\n # issues using Bool on mysql\n \"ck\": \"ck_%(table_name)s_%(column_0_N_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\n\nlog = logging.getLogger('atomdb.sql')\n\n\ndef find_sql_models():\n \"\"\" Finds all non-abstract imported SQLModels by looking up subclasses\n of the SQLModel.\n\n Yields\n ------\n cls: SQLModel\n\n \"\"\"\n for model in find_subclasses(SQLModel):\n # Get model Meta class\n meta = getattr(model, 'Meta', None)\n if meta:\n # If this is marked as abstract ignore it\n if getattr(meta, 'abstract', False):\n continue\n yield model\n\n\nclass Relation(ContainerList):\n \"\"\" A member which serves as a fk relation backref\n\n \"\"\"\n __slots__ = ('_to',)\n\n def __init__(self, item, default=None):\n super(Relation, self).__init__(ForwardInstance(item), default=None)\n self._to = None\n\n def resolve(self):\n return self.to\n\n @property\n def to(self):\n to = self._to\n if not to:\n to = self._to = resolve_member_type(self.validate_mode[-1])\n return to\n\n\ndef py_type_to_sql_column(model, member, cls, **kwargs):\n \"\"\" Convert the python type to an alchemy table column type\n\n \"\"\"\n if issubclass(cls, JSONModel):\n return sa.JSON(**kwargs)\n elif issubclass(cls, Model):\n name = f'{cls.__model__}.{cls.__pk__}'\n cls.__backrefs__.add((model, member))\n\n # Determine the type of the foreign key\n column = create_table_column(cls, cls._id)\n return (column.type, sa.ForeignKey(name, **kwargs))\n elif issubclass(cls, str):\n return sa.String(**kwargs)\n elif issubclass(cls, int):\n return sa.Integer(**kwargs)\n elif issubclass(cls, float):\n return sa.Float(**kwargs)\n elif issubclass(cls, dict):\n return sa.JSON(**kwargs)\n elif issubclass(cls, (tuple, list)):\n return sa.ARRAY(**kwargs)\n elif issubclass(cls, datetime.datetime):\n return sa.DateTime(**kwargs)\n elif issubclass(cls, datetime.date):\n return sa.Date(**kwargs)\n elif issubclass(cls, datetime.time):\n return sa.Time(**kwargs)\n elif issubclass(cls, datetime.timedelta):\n return sa.Interval(**kwargs)\n elif issubclass(cls, (bytes, bytearray)):\n return sa.LargeBinary(**kwargs)\n elif issubclass(cls, Decimal):\n return sa.Numeric(**kwargs)\n raise NotImplementedError(\n f\"A type for {member.name} of {model} ({cls}) could not be \"\n f\"determined automatically, please specify it manually by tagging it \"\n f\"with .tag(column=) or set `store=False`\")\n\n\ndef resolve_member_type(member):\n \"\"\" Determine the type specified on a member to determine ForeignKey\n relations.\n\n Parameters\n ----------\n member: atom.catom.Member\n The member to retrieve the type from\n Returns\n -------\n object: Model or object\n The type specified.\n\n \"\"\"\n if hasattr(member, 'resolve'):\n return member.resolve()\n return member.validate_mode[-1]\n\n\ndef resolve_member_column(model, field, related_clauses=None):\n \"\"\" Get the sqlalchemy column for the given model and field.\n\n Parameters\n ----------\n model: atomdb.sql.Model\n The model to lookup\n field: String\n The field name\n\n Returns\n -------\n result: Tuple[Table, Column]\n A tuple containing the through table (or None) and the\n sqlalchemy column.\n\n \"\"\"\n if model is None or not field:\n raise ValueError(\"Invalid field %s on %s\" % (field, model))\n\n # Walk the relations\n if '__' in field:\n path = field\n *related_parts, field = field.split(\"__\")\n clause = \"__\".join(related_parts)\n if related_clauses is not None and clause not in related_clauses:\n related_clauses.append(clause)\n\n # Follow the FK lookups\n # Rename so the original lookup path is retained if an error occurs\n rel_model = model\n for part in related_parts:\n m = rel_model.members().get(part)\n if m is None:\n raise ValueError(\"Invalid field %s on %s\" % (path, model))\n rel_model = resolve_member_type(m)\n if rel_model is None:\n raise ValueError(\"Invalid field %s on %s\" % (path, model))\n model = rel_model\n\n # Lookup the member\n m = model.members().get(field)\n if m is not None:\n if m.metadata:\n # If the field has a different name assigned use that\n field = m.metadata.get('name', field)\n if isinstance(m, Relation):\n # Support looking up columns through a relation by the pk\n model = m.to\n\n # Add the through table to the related clauses if needed\n if related_clauses is not None and field not in related_clauses:\n related_clauses.append(field)\n\n field = model.__pk__\n\n # Finally get the column from the table\n col = model.objects.table.columns.get(field)\n if col is None:\n raise ValueError(\"Invalid field %s on %s\" % (field, model))\n return col\n\n\ndef atom_member_to_sql_column(model, member, **kwargs):\n \"\"\" Convert the atom member type to an sqlalchemy table column type\n See https://docs.sqlalchemy.org/en/latest/core/type_basics.html\n\n \"\"\"\n if hasattr(member, 'get_column_type'):\n # Allow custom members to define the column type programatically\n return member.get_column_type(model)\n elif isinstance(member, api.Str):\n return sa.String(**kwargs)\n elif hasattr(api, 'Unicode') and isinstance(member, api.Unicode):\n return sa.Unicode(**kwargs)\n elif isinstance(member, api.Bool):\n return sa.Boolean()\n elif isinstance(member, api.Int):\n return sa.Integer()\n elif hasattr(api, 'Long') and isinstance(member, api.Long):\n return sa.BigInteger()\n elif isinstance(member, api.Float):\n return sa.Float()\n elif isinstance(member, api.Range):\n # TODO: Add min / max\n return sa.Integer()\n elif isinstance(member, api.FloatRange):\n # TODO: Add min / max\n return sa.Float()\n elif isinstance(member, api.Enum):\n return sa.Enum(*member.items, name=member.name)\n elif isinstance(member, api.IntEnum):\n return sa.SmallInteger()\n elif isinstance(member, FK_TYPES):\n value_type = resolve_member_type(member)\n if value_type is None:\n raise TypeError(\"Instance and Typed members must specify types\")\n return py_type_to_sql_column(model, member, value_type, **kwargs)\n elif isinstance(member, Relation):\n # Relations are for backrefs\n item_type = member.validate_mode[-1]\n if item_type is None:\n raise TypeError(\"Relation members must specify types\")\n\n # Resolve the item type\n value_type = resolve_member_type(item_type)\n if value_type is None:\n raise TypeError(\"Relation members must specify types\")\n return None # Relations are just syntactic sugar\n elif isinstance(member, (api.List, api.ContainerList, api.Tuple)):\n item_type = member.validate_mode[-1]\n if item_type is None:\n raise TypeError(\"List and Tuple members must specify types\")\n\n # Resolve the item type\n value_type = resolve_member_type(item_type)\n if value_type is None:\n raise TypeError(\"List and Tuple members must specify types\")\n if issubclass(value_type, JSONModel):\n return sa.JSON(**kwargs)\n t = py_type_to_sql_column(model, member, value_type, **kwargs)\n if isinstance(t, tuple):\n t = t[0] # Use only the value type\n return sa.ARRAY(t)\n elif isinstance(member, api.Bytes):\n return sa.LargeBinary(**kwargs)\n elif isinstance(member, api.Dict):\n return sa.JSON(**kwargs)\n raise NotImplementedError(\n f\"A column for {member.name} of {model} could not be determined \"\n f\"automatically, please specify it manually by tagging it \"\n f\"with .tag(column=)\")\n\n\ndef create_table_column(model, member):\n \"\"\" Converts an Atom member into a sqlalchemy data type.\n\n Parameters\n ----------\n model: Model\n The model which owns this member\n member: Member\n The atom member\n\n Returns\n -------\n column: Column\n An sqlalchemy column\n\n References\n ----------\n 1. https://docs.sqlalchemy.org/en/latest/core/types.html\n\n \"\"\"\n if hasattr(member, 'get_column'):\n # Allow custom members to define the column programatically\n return member.get_column(model)\n\n # Copy the metadata as we modify it\n metadata = member.metadata.copy() if member.metadata else {}\n\n # If a column is specified use that\n if 'column' in metadata:\n return metadata['column']\n\n metadata.pop('store', None)\n column_name = metadata.pop('name', member.name)\n column_type = metadata.pop('type', None)\n\n # Extract column kwargs from member metadata\n kwargs = {}\n for k in COLUMN_KWARGS:\n if k in metadata:\n kwargs[k] = metadata.pop(k)\n\n if column_type is None:\n args = atom_member_to_sql_column(model, member, **metadata)\n if args is None:\n return None\n if not isinstance(args, (tuple, list)):\n args = (args,)\n elif isinstance(column_type, (tuple, list)):\n args = column_type\n else:\n args = (column_type,)\n return sa.Column(column_name, *args, **kwargs)\n\n\ndef create_table(model, metadata):\n \"\"\" Create an sqlalchemy table by inspecting the Model and generating\n a column for each member.\n\n Parameters\n ----------\n model: SQLModel\n The atom model\n\n References\n ----------\n 1. https://docs.sqlalchemy.org/en/latest/core/metadata.html\n\n \"\"\"\n name = model.__model__\n members = model.members()\n args = []\n\n # Add columns\n for f in model.__fields__:\n column = create_table_column(model, members[f])\n if column is not None:\n args.append(column)\n\n # Add table metadata\n meta = getattr(model, 'Meta', None)\n if meta:\n # Abstract field\n abstract = getattr(meta, 'abstract', False)\n if abstract:\n raise NotImplementedError(\n f\"Tables cannot be created for abstract models: {model}\")\n\n # Unique constraints\n unique_together = getattr(meta, 'unique_together', None)\n if unique_together is not None:\n if not isinstance(unique_together, (tuple, list)):\n raise TypeError(\"Meta unique_together must be a tuple or list\")\n if isinstance(unique_together[0], str):\n unique_together = [unique_together]\n for constraint in unique_together:\n if isinstance(constraint, (tuple, list)):\n constraint = sa.UniqueConstraint(*constraint)\n args.append(constraint)\n\n # Check constraints\n constraints = getattr(meta, 'constraints', None)\n if constraints is not None:\n if not isinstance(constraints, (tuple, list)):\n raise TypeError(\"Meta constraints must be a tuple or list\")\n args.extend(constraints)\n\n # Create table\n table = sa.Table(name, metadata, *args)\n\n # Hook up any database triggers defined\n triggers = getattr(meta, 'triggers', None)\n if triggers is not None:\n if isinstance(triggers, dict):\n triggers = list(triggers.items())\n elif not isinstance(triggers, (tuple, list)):\n raise TypeError(\"Meta triggers must be a dict, tuple, or list\")\n for event, trigger in triggers:\n # Allow triggers to be a lambda that generates one\n if not isinstance(trigger, sa.schema.DDL) and callable(trigger):\n trigger = trigger()\n sa.event.listen(table, event, trigger)\n\n return table\n\n\nclass SQLModelSerializer(ModelSerializer):\n \"\"\" Uses sqlalchemy to lookup the model.\n\n \"\"\"\n def flatten_object(self, obj, scope):\n \"\"\" Serialize a model for entering into the database\n\n Parameters\n ----------\n obj: Model\n The object to unflatten\n scope: Dict\n The scope of references available for circular lookups\n\n Returns\n -------\n result: Object\n The flattened object\n\n \"\"\"\n return obj._id\n\n async def get_object_state(self, obj, state, scope):\n \"\"\" Load the object state if needed. Since the __model__ is not saved\n to the db tables with SQL we know that if it's \"probably\" there\n because a query was used.\n \"\"\"\n ModelType = obj.__class__\n if '__model__' in state:\n return state # Joined already\n q = ModelType.objects.query(None, _id=state['_id'])\n return await ModelType.objects.fetchone(q)\n\n def _default_registry(self):\n \"\"\" Add all sql and json models to the registry\n \"\"\"\n registry = JSONSerializer.instance().registry.copy()\n registry.update({m.__model__: m for m in find_sql_models()})\n return registry\n\n\nclass SQLModelManager(ModelManager):\n \"\"\" Manages models via aiopg, aiomysql, or similar libraries supporting\n SQLAlchemy tables. It stores a table for each class and when accessed\n on a Model subclass it returns a table proxy binding.\n\n \"\"\"\n\n #: Constraint naming convenctions\n conventions = Dict(default=CONSTRAINT_NAMING_CONVENTIONS)\n\n #: Metadata\n metadata = Instance(sa.MetaData)\n\n #: Table proxy cache\n proxies = Dict()\n\n #: Cache results\n cache = Bool(True)\n\n def _default_metadata(self):\n return sa.MetaData(\n SQLBinding(manager=self),\n naming_convention=self.conventions)\n\n def create_tables(self):\n \"\"\" Create sqlalchemy tables for all registered SQLModels\n\n \"\"\"\n tables = {}\n for cls in find_sql_models():\n table = cls.__table__\n if table is None:\n table = cls.__table__ = create_table(cls, self.metadata)\n if not table.metadata.bind:\n table.metadata.bind = SQLBinding(manager=self, table=table)\n tables[cls] = table\n return tables\n\n def __get__(self, obj, cls=None):\n \"\"\" Retrieve the table for the requested object or class.\n\n \"\"\"\n cls = cls or obj.__class__\n if not issubclass(cls, Model):\n return self # Only return the client when used from a Model\n proxy = self.proxies.get(cls)\n if proxy is None:\n table = cls.__table__\n if table is None:\n table = cls.__table__ = create_table(cls, self.metadata)\n proxy = self.proxies[cls] = SQLTableProxy(table=table, model=cls)\n return proxy\n\n def _default_database(self):\n raise EnvironmentError(\"No database engine has been set. Use \"\n \"SQLModelManager.instance().database = \")\n\n\nclass ConnectionProxy(Atom):\n \"\"\" An wapper for a connection to be used with async with syntax that\n does nothing but passes the existing connection when entered.\n\n \"\"\"\n connection = Value()\n\n async def __aenter__(self):\n return self.connection\n\n async def __aexit__(self, exc_type, exc, tb):\n pass\n\n\nclass SQLTableProxy(Atom):\n #: Table this is a proxy to\n table = Instance(sa.Table)\n\n #: Model which owns the table\n model = Subclass(Model)\n\n #: Cache of pk: obj using weakrefs\n cache = Typed(weakref.WeakValueDictionary, ())\n\n #: Key used to pull the connection out of filter kwargs\n connection_kwarg = Str('connection')\n\n @property\n def engine(self):\n return self.table.bind.manager.database\n\n def connection(self, connection=None):\n \"\"\" Create a new connection or the return given connection as an async\n contextual object.\n\n Parameters\n ----------\n connection: Database connection or None\n The connection to return\n\n Returns\n -------\n connection: Database connection\n The database connection or one that may be used with async with\n\n \"\"\"\n if connection is None:\n return self.engine.acquire()\n return ConnectionProxy(connection=connection)\n\n def create_table(self):\n \"\"\" A wrapper for create which catches the create queries then executes\n them\n \"\"\"\n table = self.table\n table.create()\n return table.bind.wait()\n\n def drop_table(self):\n table = self.table\n table.drop()\n return table.bind.wait()\n\n async def execute(self, *args, **kwargs):\n connection = kwargs.pop(self.connection_kwarg, None)\n async with self.connection(connection) as conn:\n return await conn.execute(*args, **kwargs)\n\n async def fetchall(self, query, connection=None):\n \"\"\" Fetch all results for the query.\n\n Parameters\n ----------\n query: String or Query\n The query to execute\n connection: Database connection\n The connection to use or a new one will be created\n\n Returns\n -------\n rows; List\n List of rows returned, NOT objects\n\n \"\"\"\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.fetchall()\n\n async def fetchmany(self, query, size=None, connection=None):\n \"\"\" Fetch size results for the query.\n\n Parameters\n ----------\n query: String or Query\n The query to execute\n size: Int or None\n The number of results to fetch\n connection: Database connection\n The connection to use or a new one will be created\n\n Returns\n -------\n rows: List\n List of rows returned, NOT objects\n\n \"\"\"\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.fetchmany(size)\n\n async def fetchone(self, query, connection=None):\n \"\"\" Fetch a single result for the query.\n\n Parameters\n ----------\n query: String or Query\n The query to execute\n connection: Database connection\n The connection to use or a new one will be created\n\n Returns\n -------\n rows: Object or None\n The row returned or None\n \"\"\"\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.fetchone()\n\n async def scalar(self, query, connection=None):\n \"\"\" Fetch the scalar result for the query.\n\n Parameters\n ----------\n query: String or Query\n The query to execute\n connection: Database connection\n The connection to use or a new one will be created\n\n Returns\n -------\n result: Object or None\n The the first column of the first row or None\n \"\"\"\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.scalar()\n\n async def get_or_create(self, **filters):\n \"\"\" Get or create a model matching the given criteria\n\n Parameters\n ----------\n filters: Dict\n The filters to use to retrieve the object\n\n Returns\n -------\n result: Tuple[Model, Bool]\n A tuple of the object and a bool indicating if it was just created\n\n \"\"\"\n obj = await self.get(**filters)\n if obj is not None:\n return (obj, False)\n connection_kwarg = self.connection_kwarg\n connection = filters.get(connection_kwarg)\n state = {k: v for k, v in filters.items()\n if '__' not in k and k != connection_kwarg}\n obj = self.model(**state)\n await obj.save(force_insert=True, connection=connection)\n return (obj, True)\n\n async def create(self, **state):\n \"\"\" Create a and save model with the given state.\n\n The connection parameter is popped from this state.\n\n Parameters\n ----------\n state: Dict\n The state to use to initialize the object.\n\n Returns\n -------\n result: Tuple[Model, Bool]\n A tuple of the object and a bool indicating if it was just created\n\n \"\"\"\n connection = state.pop(self.connection_kwarg, None)\n obj = self.model(**state)\n await obj.save(force_insert=True, connection=connection)\n return obj\n\n def __getattr__(self, name):\n \"\"\" All other fields are delegated to the query set\n\n \"\"\"\n return getattr(SQLQuerySet(proxy=self), name)\n\n\nclass SQLQuerySet(Atom):\n #: Proxy\n proxy = Instance(SQLTableProxy)\n connection = Value()\n\n filter_clauses = List()\n related_clauses = List()\n outer_join = Bool()\n order_clauses = List()\n limit_count = Int()\n query_offset = Int()\n\n def clone(self, **kwargs):\n state = self.__getstate__()\n state.update(kwargs)\n return self.__class__(**state)\n\n def query(self, query_type='select', *columns, **kwargs):\n if kwargs:\n return self.filter(**kwargs).query(query_type)\n p = self.proxy\n tables = [p.table]\n from_table = p.table\n model = p.model\n members = model.members()\n use_labels = bool(self.related_clauses)\n outer_join = self.outer_join\n for clause in self.related_clauses:\n from_table = p.table\n for part in clause.split(\"__\"):\n m = members.get(part)\n rel_model = resolve_member_type(m)\n table = rel_model.objects.table\n from_table = sa.join(from_table, table, isouter=outer_join)\n tables.append(table)\n\n if query_type == 'select':\n q = sa.select(columns or tables, use_labels=use_labels)\n q = q.select_from(from_table)\n elif query_type == 'delete':\n q = sa.delete(from_table)\n elif query_type == 'update':\n q = sa.update(from_table)\n else:\n raise ValueError(\"Unsupported query type\")\n\n if self.filter_clauses:\n if len(self.filter_clauses) == 1:\n q = q.where(self.filter_clauses[0])\n else:\n q = q.where(sa.and_(*self.filter_clauses))\n\n if self.order_clauses:\n q = q.order_by(*self.order_clauses)\n\n if self.limit_count:\n q = q.limit(self.limit_count)\n\n if self.query_offset:\n q = q.offset(self.query_offset)\n\n return q\n\n def select_related(self, *related, outer_join=None):\n \"\"\" Define related fields to join in the query.\n\n Parameters\n ----------\n args: List[str]\n List of related fields to join.\n outer_join: Bool\n If given set whether or not a left outer join is used.\n\n Returns\n -------\n query: SQLQuerySet\n A clone of this queryset with the related field terms added.\n\n \"\"\"\n outer_join = self.outer_join if outer_join is None else outer_join\n return self.clone(\n related_clauses=self.related_clauses + list(related),\n outer_join=outer_join)\n\n def order_by(self, *args):\n \"\"\" Order the query by the given fields.\n\n Parameters\n ----------\n args: List[str or column]\n Fields to order by. A \"-\" prefix denotes decending.\n\n Returns\n -------\n query: SQLQuerySet\n A clone of this queryset with the ordering terms added.\n\n \"\"\"\n order_clauses = self.order_clauses[:]\n related_clauses = self.related_clauses[:]\n model = self.proxy.model\n for arg in args:\n if isinstance(arg, str):\n # Convert django-style to sqlalchemy ordering column\n if arg[0] == '-':\n field = arg[1:]\n ascending = False\n else:\n field = arg\n ascending = True\n\n col = resolve_member_column(model, field, related_clauses)\n\n if ascending:\n clause = col.asc()\n else:\n clause = col.desc()\n else:\n clause = arg\n if clause not in order_clauses:\n order_clauses.append(clause)\n return self.clone(order_clauses=order_clauses,\n related_clauses=related_clauses)\n\n def filter(self, *args, **kwargs):\n \"\"\" Filter the query by the given parameters. This accepts sqlalchemy\n filters by arguments and django-style parameters as kwargs.\n\n Parameters\n ----------\n args: List\n List of sqlalchemy filters\n kwargs: Dict[str, object]\n Django style filters to use\n\n Returns\n -------\n query: SQLQuerySet\n A clone of this queryset with the filter terms added.\n\n \"\"\"\n p = self.proxy\n filter_clauses = self.filter_clauses + list(args)\n related_clauses = self.related_clauses[:]\n\n connection_kwarg = p.connection_kwarg\n connection = self.connection\n\n # Build the filter operations\n for k, v in kwargs.items():\n # Ignore connection parameter\n if k == connection_kwarg:\n connection = v\n continue\n model = p.model\n op = \"eq\"\n if \"__\" in k:\n parts = k.split(\"__\")\n if parts[-1] in QUERY_OPS:\n op = parts[-1]\n k = \"__\".join(parts[:-1])\n col = resolve_member_column(model, k, related_clauses)\n\n # Support lookups by model\n if isinstance(v, Model):\n v = v.serializer.flatten_object(v, scope=None)\n elif op in ('in', 'notin'):\n # Flatten lists when using in or notin ops\n v = model.serializer.flatten(v, scope=None)\n\n clause = getattr(col, QUERY_OPS[op])(v)\n filter_clauses.append(clause)\n\n return self.clone(\n connection=connection,\n filter_clauses=filter_clauses,\n related_clauses=related_clauses)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n offset = key.start or 0\n limit = key.stop - key.start if key.stop else 0\n elif isinstance(key, int):\n limit = 1\n offset = key\n else:\n raise TypeError(\"Invalid key\")\n if offset < 0:\n raise ValueError(\"Cannot use a negative offset\")\n if limit < 0:\n raise ValueError(\"Cannot use a negative limit\")\n return self.clone(limit_count=limit, query_offset=offset)\n\n def limit(self, limit):\n return self.clone(limit_count=limit)\n\n def offset(self, offset):\n return self.clone(query_offset=offset)\n\n # -------------------------------------------------------------------------\n # Query execution API\n # -------------------------------------------------------------------------\n async def values(self, *args, distinct=False, flat=False, group_by=None):\n \"\"\" Returns the results as a list of dict instead of models.\n\n Parameters\n ----------\n args: List[str or column]\n List of columns to select\n distinct: Bool\n Return only distinct rows\n flat: Bool\n Requires exactly one arg and will flatten the result int a single\n list of values.\n group_by: List[str or column]\n Optional Columns to group by\n\n Returns\n -------\n results: List\n List of results depending on the parameters described above\n\n \"\"\"\n if flat and len(args) != 1:\n raise ValueError(\n \"Values with flat=True can only have one param\")\n if args:\n model = self.proxy.model\n columns = []\n for col in args:\n if isinstance(col, str):\n col = resolve_member_column(model, col)\n columns.append(col)\n q = self.query('select', *columns)\n else:\n q = self.query('select')\n if group_by is not None:\n q = q.group_by(group_by)\n if distinct:\n q = q.distinct()\n cursor = await self.proxy.fetchall(q, connection=self.connection)\n if flat:\n return [row[0] for row in cursor]\n return cursor\n\n async def count(self, *args, **kwargs):\n if args or kwargs:\n return await self.filter(*args, **kwargs).count()\n subq = self.query('select').alias('subquery')\n q = sa.func.count().select().select_from(subq)\n return await self.proxy.scalar(q, connection=self.connection)\n\n def max(self, *columns):\n return self.aggregate(*columns, func=sa.func.max)\n\n def min(self, *columns):\n return self.aggregate(*columns, func=sa.func.min)\n\n def mode(self, *columns):\n return self.aggregate(*columns, func=sa.func.mode)\n\n def sum(self, *columns):\n return self.aggregate(*columns, func=sa.func.sum)\n\n def aggregate(self, *args, func=None):\n model = self.proxy.model\n columns = []\n for col in args:\n if isinstance(col, str):\n col = resolve_member_column(model, col)\n columns.append(func(col) if func is not None else col)\n subq = self.query('select').alias('subquery')\n q = sa.select(columns).select_from(subq)\n return self.proxy.fetchone(q, connection=self.connection)\n\n async def exists(self, *args, **kwargs):\n if args or kwargs:\n return await self.filter(*args, **kwargs).exists()\n q = sa.exists(self.query('select')).select()\n return await self.proxy.scalar(q, connection=self.connection)\n\n async def delete(self, *args, **kwargs):\n if args or kwargs:\n return await self.filter(*args, **kwargs).delete()\n q = self.query('delete')\n return await self.proxy.execute(q, connection=self.connection)\n\n async def update(self, **values):\n q = self.query('update').values(**values)\n return await self.proxy.execute(q, connection=self.connection)\n\n def __await__(self):\n # So await Model.objects.filter() works\n f = asyncio.ensure_future(self.all())\n yield from f\n return f.result()\n\n async def all(self, *args, **kwargs):\n if args or kwargs:\n return await self.filter(*args, **kwargs).all()\n q = self.query('select')\n restore = self.proxy.model.restore\n cursor = await self.proxy.fetchall(q, connection=self.connection)\n return [await restore(row) for row in cursor]\n\n async def get(self, *args, **kwargs):\n if args or kwargs:\n return await self.filter(*args, **kwargs).get()\n q = self.query('select')\n row = await self.proxy.fetchone(q, connection=self.connection)\n if row is not None:\n return await self.proxy.model.restore(row)\n\n\nclass SQLBinding(Atom):\n #: Model Manager\n manager = Instance(SQLModelManager)\n\n #: The queue\n queue = ContainerList()\n\n engine = property(lambda s: s)\n\n @property\n def name(self):\n return self.dialect.name\n\n @property\n def dialect(self):\n return self.manager.database.dialect\n\n def schema_for_object(self, obj):\n return obj.schema\n\n def contextual_connect(self, **kwargs):\n return self\n\n def connect(self, **kwargs):\n return self\n\n def execution_options(self, **kw):\n return self\n\n def compiler(self, statement, parameters, **kwargs):\n return self.dialect.compiler(\n statement, parameters, engine=self, **kwargs)\n\n def create(self, entity, **kwargs):\n kwargs[\"checkfirst\"] = False\n node = ddl.SchemaGenerator(self.dialect, self, **kwargs)\n node.traverse_single(entity)\n\n def drop(self, entity, **kwargs):\n kwargs[\"checkfirst\"] = False\n node = ddl.SchemaDropper(self.dialect, self, **kwargs)\n node.traverse_single(entity)\n\n def _run_ddl_visitor(\n self, visitorcallable, element, connection=None, **kwargs):\n kwargs[\"checkfirst\"] = False\n visitorcallable(self.dialect, self, **kwargs).traverse_single(element)\n\n def _run_visitor(\n self, visitorcallable, element, connection=None, **kwargs):\n kwargs[\"checkfirst\"] = False\n node = visitorcallable(self.dialect, self, **kwargs)\n node.traverse_single(element)\n\n def execute(self, object_, *multiparams, **params):\n self.queue.append((object_, multiparams, params))\n\n async def wait(self):\n engine = self.manager.database\n result = None\n async with engine.acquire() as conn:\n try:\n while self.queue:\n op, args, kwargs = self.queue.pop(0)\n result = await conn.execute(op, args)\n finally:\n self.queue = [] # Wipe queue on error\n return result\n\n\nclass SQLMeta(ModelMeta):\n \"\"\" Both the pk and _id are aliases to the primary key\n\n \"\"\"\n def __new__(meta, name, bases, dct):\n cls = ModelMeta.__new__(meta, name, bases, dct)\n\n members = cls.members()\n\n # If a pk field is defined use that insetad of _id\n pk = None\n for m in members.values():\n if m.name == '_id':\n continue\n if m.metadata and m.metadata.get('primary_key'):\n if pk is not None:\n raise NotImplementedError(\n \"Using multiple primary keys is not yet supported.\")\n pk = m\n\n if pk:\n cls._id = pk\n members['_id'] = pk\n cls.__fields__ = tuple((f for f in cls.__fields__ if f != '_id'))\n\n # Set the pk name\n cls.__pk__ = (cls._id.metadata or {}).get('name', cls._id.name)\n\n # Set to the sqlalchemy Table\n cls.__table__ = None\n\n # Will be set to the table model by manager, not done here to avoid\n # import errors that may occur\n cls.__backrefs__ = set()\n\n # If a Meta class is defined check it's validity and if extending\n # do not inherit the abstract attribute\n Meta = dct.get('Meta', None)\n if Meta is not None:\n for f in dir(Meta):\n if f.startswith('_'):\n continue\n if f not in VALID_META_FIELDS:\n raise TypeError(\n f'{f} is not a valid Meta field on {cls}.')\n\n db_table = getattr(Meta, 'db_table', None)\n if db_table:\n cls.__model__ = db_table\n\n # If this inherited from an abstract model but didn't specify\n # Meta info make the subclass not abstract unless it was redefined\n base_meta = getattr(cls, 'Meta', None)\n if base_meta and getattr(base_meta, 'abstract', None):\n if not Meta:\n class Meta(base_meta):\n abstract = False\n cls.Meta = Meta\n elif getattr(Meta, 'abstract', None) is None:\n Meta.abstract = False\n\n # Create a set of fields to remove from state before saving to the db\n # this removes Relation instances and several needed for json\n excluded_fields = cls.__excluded_fields__ = {\n '__model__', '__ref__', '_id'\n }\n\n for name, member in cls.members().items():\n if isinstance(member, Relation):\n excluded_fields.add(name)\n\n # Cache the mapping of any renamed fields\n renamed_fields = cls.__renamed_fields__ = {}\n for old_name, member in cls.members().items():\n if member.metadata:\n new_name = member.metadata.get('name')\n if new_name is not None:\n renamed_fields[old_name] = new_name\n\n return cls\n\n\nclass SQLModel(Model, metaclass=SQLMeta):\n \"\"\" A model that can be saved and restored to and from a database supported\n by sqlalchemy.\n\n \"\"\"\n\n #: If no other member is tagged with primary_key=True this is used\n _id = Typed(int).tag(store=True, primary_key=True)\n\n #: Use SQL serializer\n serializer = SQLModelSerializer.instance()\n\n #: Use SQL object manager\n objects = SQLModelManager.instance()\n\n @classmethod\n async def restore(cls, state, force=None):\n \"\"\" Restore an object from the database using the primary key. Save\n a ref in the table's object cache. If force is True, update\n the cache if it exists.\n \"\"\"\n try:\n pk = state[f'{cls.__model__}_{cls.__pk__}']\n except KeyError:\n pk = state[cls.__pk__]\n\n # Check the default for force reloading\n if force is None:\n force = not cls.objects.table.bind.manager.cache\n\n # Check if this is in the cache\n cache = cls.objects.cache\n obj = cache.get(pk)\n if obj is None:\n # Create and cache it\n obj = cls.__new__(cls)\n cache[pk] = obj\n\n # This ideally should only be done if created\n await obj.__restorestate__(state)\n elif force or not obj.__restored__:\n await obj.__restorestate__(state)\n\n return obj\n\n async def __restorestate__(self, state, scope=None):\n # Holds cleaned state extracted for this model which may come from\n # a DB row using labels or renamed columns\n cleaned_state = {}\n\n # Check if the state is using labels by looking for the pk field\n pk_label = f'{self.__model__}_{self.__pk__}'\n\n if pk_label in state:\n # Convert row to dict because it speeds up lookups\n state = dict(state)\n # Convert the joined tables into nested states\n table_name = self.__table__.name\n pk = state[pk_label]\n\n # Pull known\n for name, m in self.members().items():\n metadata = (m.metadata or {})\n field_name = metadata.get('name', name)\n field_label = f'{table_name}_{field_name}'\n\n if isinstance(m, FK_TYPES):\n RelModel = resolve_member_type(m)\n if issubclass(RelModel, SQLModel):\n # If the related model was joined, the pk field should\n # exist so automatically restore that as well\n rel_pk_name = f'{RelModel.__model__}_{RelModel.__pk__}'\n try:\n rel_id = state[field_label]\n except KeyError:\n rel_id = state.get(rel_pk_name)\n if rel_id:\n # Lookup in cache first to avoid recursion errors\n cache = RelModel.objects.cache\n obj = cache.get(rel_id)\n if obj is None:\n if rel_pk_name in state:\n obj = await RelModel.restore(state)\n else:\n # Create an unloaded model\n obj = RelModel.__new__(RelModel)\n cache[rel_id] = obj\n obj._id = rel_id\n cleaned_state[name] = obj\n continue\n\n elif isinstance(m, Relation):\n # Through must be a callable which returns a tuple of\n # the through table model\n through_factory = metadata.get('through')\n if through_factory:\n M2M, this_attr, rel_attr = through_factory()\n cleaned_state[name] = [\n getattr(r, rel_attr)\n for r in await M2M.objects.filter(\n **{this_attr: pk})\n ]\n else:\n # Skip relations\n continue\n\n # Regular fields\n try:\n cleaned_state[name] = state[field_label]\n except KeyError:\n continue\n\n else:\n # If any column names were redefined use those instead\n for name, m in self.members().items():\n field_name = (m.metadata or {}).get('name', name)\n\n try:\n v = state[field_name]\n except KeyError:\n continue\n\n # Attempt to lookup related fields from the cache\n if v is not None and isinstance(m, FK_TYPES):\n RelModel = resolve_member_type(m)\n if issubclass(RelModel, SQLModel):\n cache = RelModel.objects.cache\n obj = cache.get(v)\n if obj is None:\n # Create an unloaded model\n obj = RelModel.__new__(RelModel)\n cache[v] = obj\n obj._id = v\n v = obj\n elif issubclass(RelModel, JSONModel):\n v = await RelModel.restore(v)\n\n cleaned_state[name] = v\n await super().__restorestate__(cleaned_state, scope)\n\n async def load(self, connection=None, reload=False, fields=None):\n \"\"\" Alias to load this object from the database\n\n Parameters\n ----------\n connection: Connection\n The connection instance to use in a transaction\n reload: Bool\n If True force reloading the state even if the state has\n already been loaded.\n fields: Iterable[str]\n Optional list of field names to load. Use this to refresh\n specific fields from the database.\n\n \"\"\"\n skip = self.__restored__ and not reload and not fields\n if skip or self._id is None:\n return # Already loaded or won't do anything\n db = self.objects\n t = db.table\n if fields is not None:\n renamed = self.__renamed_fields__\n columns = (t.c[renamed.get(f, f)] for f in fields)\n q = sa.select(columns).select_from(t)\n else:\n q = t.select()\n q = q.where(t.c[self.__pk__] == self._id)\n state = await db.fetchone(q, connection=connection)\n await self.__restorestate__(state)\n\n async def save(self, force_insert=False, force_update=False,\n update_fields=None, connection=None):\n \"\"\" Alias to save this object to the database\n\n Parameters\n ----------\n force_insert: Bool\n Ensure that save performs an insert\n force_update: Bool\n Ensure that save performs an update\n update_fields: Iterable[str]\n If given, only update the given fields\n connection: Connection\n The connection instance to use in a transaction\n\n Returns\n -------\n result: Value\n Update or save result\n\n \"\"\"\n if force_insert and force_update:\n raise ValueError(\n 'Cannot use force_insert and force_update together')\n\n db = self.objects\n state = self.__getstate__()\n\n # Remove any fields are in the state but should not go into the db\n for f in self.__excluded_fields__:\n state.pop(f, None)\n\n # Replace any renamed fields\n for old_name, new_name in self.__renamed_fields__.items():\n state[new_name] = state.pop(old_name)\n\n table = db.table\n async with db.connection(connection) as conn:\n if force_update or (self._id and not force_insert):\n\n # If update fields was given, only pass those\n if update_fields is not None:\n # Replace any update fields with the appropriate name\n renamed = self.__renamed_fields__\n update_fields = (renamed.get(f, f) for f in update_fields)\n\n # Replace update fields with only those given\n state = {f: state[f] for f in update_fields}\n\n q = table.update().where(\n table.c[self.__pk__] == self._id).values(**state)\n r = await conn.execute(q)\n if not r.rowcount:\n log.warning(\n f'Did not update \"{self}\", either no rows with '\n f'pk={self._id} exist or it has not changed.')\n else:\n if not self._id:\n # Postgres errors if using None for the pk\n state.pop(self.__pk__, None)\n q = table.insert().values(**state)\n r = await conn.execute(q)\n\n # Don't overwrite if force inserting\n if not self._id:\n if hasattr(r, 'lastrowid'):\n self._id = r.lastrowid # MySQL\n else:\n self._id = await r.scalar() # Postgres\n\n # Save a ref to the object in the model cache\n db.cache[self._id] = self\n self.__restored__ = True\n return r\n\n async def delete(self, connection=None):\n \"\"\" Alias to delete this object in the database \"\"\"\n pk = self._id\n if not pk:\n return\n db = self.objects\n table = db.table\n q = table.delete().where(table.c[self.__pk__] == pk)\n async with db.connection(connection) as conn:\n r = await conn.execute(q)\n if not r.rowcount:\n log.warning(\n f'Did not delete \"{self}\", no rows with '\n f'pk={self._id} exist.')\n del db.cache[pk]\n del self._id\n return r\n", "sub_path": "atomdb/sql.py", "file_name": "sql.py", "file_ext": "py", "file_size_in_byte": 48769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "atom.api.Instance", "line_number": 44, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 44, "usage_type": "name"}, {"api_name": "atom.api.Typed", "line_number": 44, "usage_type": "attribute"}, {"api_name": "atom.api.ForwardInstance", "line_number": 44, "usage_type": "attribute"}, {"api_name": "atom.api.ForwardTyped", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 91, "usage_type": "call"}, {"api_name": "base.find_subclasses", "line_number": 103, "usage_type": "call"}, {"api_name": "atom.api.ContainerList", "line_number": 113, "usage_type": "name"}, {"api_name": "atom.api.ForwardInstance", "line_number": 120, "usage_type": "call"}, {"api_name": "base.JSONModel", "line_number": 138, "usage_type": "argument"}, {"api_name": "sqlalchemy.JSON", "line_number": 139, "usage_type": "call"}, {"api_name": "base.Model", "line_number": 140, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 146, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 150, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 152, "usage_type": "call"}, {"api_name": "sqlalchemy.JSON", "line_number": 154, "usage_type": "call"}, {"api_name": "sqlalchemy.ARRAY", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 157, "usage_type": "attribute"}, {"api_name": "sqlalchemy.DateTime", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Date", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 161, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Time", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 163, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Interval", "line_number": 164, "usage_type": "call"}, {"api_name": "sqlalchemy.LargeBinary", "line_number": 166, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 167, "usage_type": "argument"}, {"api_name": "sqlalchemy.Numeric", "line_number": 168, "usage_type": "call"}, {"api_name": "atom.api.Str", "line_number": 265, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 265, "usage_type": "name"}, {"api_name": "sqlalchemy.String", "line_number": 266, "usage_type": "call"}, {"api_name": "atom.api", "line_number": 267, "usage_type": "argument"}, {"api_name": "atom.api.Unicode", "line_number": 267, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Unicode", "line_number": 268, "usage_type": "call"}, {"api_name": "atom.api.Bool", "line_number": 269, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 269, "usage_type": "name"}, {"api_name": "sqlalchemy.Boolean", "line_number": 270, "usage_type": "call"}, {"api_name": "atom.api.Int", "line_number": 271, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 271, "usage_type": "name"}, {"api_name": "sqlalchemy.Integer", "line_number": 272, "usage_type": "call"}, {"api_name": "atom.api", "line_number": 273, "usage_type": "argument"}, {"api_name": "atom.api.Long", "line_number": 273, "usage_type": "attribute"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 274, "usage_type": "call"}, {"api_name": "atom.api.Float", "line_number": 275, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 275, "usage_type": "name"}, {"api_name": "sqlalchemy.Float", "line_number": 276, "usage_type": "call"}, {"api_name": "atom.api.Range", "line_number": 277, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 277, "usage_type": "name"}, {"api_name": "sqlalchemy.Integer", "line_number": 279, "usage_type": "call"}, {"api_name": "atom.api.FloatRange", "line_number": 280, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 280, "usage_type": "name"}, {"api_name": "sqlalchemy.Float", "line_number": 282, "usage_type": "call"}, {"api_name": "atom.api.Enum", "line_number": 283, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 283, "usage_type": "name"}, {"api_name": "sqlalchemy.Enum", "line_number": 284, "usage_type": "call"}, {"api_name": "atom.api.IntEnum", "line_number": 285, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 285, "usage_type": "name"}, {"api_name": "sqlalchemy.SmallInteger", "line_number": 286, "usage_type": "call"}, {"api_name": "atom.api.List", "line_number": 303, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 303, "usage_type": "name"}, {"api_name": "atom.api.ContainerList", "line_number": 303, "usage_type": "attribute"}, {"api_name": "atom.api.Tuple", "line_number": 303, "usage_type": "attribute"}, {"api_name": "base.JSONModel", "line_number": 312, "usage_type": "argument"}, {"api_name": "sqlalchemy.JSON", "line_number": 313, "usage_type": "call"}, {"api_name": "sqlalchemy.ARRAY", "line_number": 317, "usage_type": "call"}, {"api_name": "atom.api.Bytes", "line_number": 318, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 318, "usage_type": "name"}, {"api_name": "sqlalchemy.LargeBinary", "line_number": 319, "usage_type": "call"}, {"api_name": "atom.api.Dict", "line_number": 320, "usage_type": "attribute"}, {"api_name": "atom.api", "line_number": 320, "usage_type": "name"}, {"api_name": "sqlalchemy.JSON", "line_number": 321, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 379, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 424, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 435, "usage_type": "call"}, {"api_name": "sqlalchemy.schema", "line_number": 446, "usage_type": "attribute"}, {"api_name": "sqlalchemy.event.listen", "line_number": 448, "usage_type": "call"}, {"api_name": "sqlalchemy.event", "line_number": 448, "usage_type": "attribute"}, {"api_name": "base.ModelSerializer", "line_number": 453, "usage_type": "name"}, {"api_name": "base.JSONSerializer.instance", "line_number": 489, "usage_type": "call"}, {"api_name": "base.JSONSerializer", "line_number": 489, "usage_type": "name"}, {"api_name": "base.ModelManager", "line_number": 494, "usage_type": "name"}, {"api_name": "atom.api.Dict", "line_number": 502, "usage_type": "call"}, {"api_name": "atom.api.Instance", "line_number": 505, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 505, "usage_type": "attribute"}, {"api_name": "atom.api.Dict", "line_number": 508, "usage_type": "call"}, {"api_name": "atom.api.Bool", "line_number": 511, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 514, "usage_type": "call"}, {"api_name": "base.Model", "line_number": 537, "usage_type": "argument"}, {"api_name": "atom.api.Atom", "line_number": 552, "usage_type": "name"}, {"api_name": "atom.api.Value", "line_number": 557, "usage_type": "call"}, {"api_name": "atom.api.Atom", "line_number": 566, "usage_type": "name"}, {"api_name": "atom.api.Instance", "line_number": 568, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 568, "usage_type": "attribute"}, {"api_name": "atom.api.Subclass", "line_number": 571, "usage_type": "call"}, {"api_name": "base.Model", "line_number": 571, "usage_type": "argument"}, {"api_name": "atom.api.Typed", "line_number": 574, "usage_type": "call"}, {"api_name": "weakref.WeakValueDictionary", "line_number": 574, "usage_type": "attribute"}, {"api_name": "atom.api.Str", "line_number": 577, "usage_type": "call"}, {"api_name": "atom.api.Atom", "line_number": 753, "usage_type": "name"}, {"api_name": "atom.api.Instance", "line_number": 755, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 756, "usage_type": "call"}, {"api_name": "atom.api.List", "line_number": 758, "usage_type": "call"}, {"api_name": "atom.api.List", "line_number": 759, "usage_type": "call"}, {"api_name": "atom.api.Bool", "line_number": 760, "usage_type": "call"}, {"api_name": "atom.api.List", "line_number": 761, "usage_type": "call"}, {"api_name": "atom.api.Int", "line_number": 762, "usage_type": "call"}, {"api_name": "atom.api.Int", "line_number": 763, "usage_type": "call"}, {"api_name": "sqlalchemy.join", "line_number": 786, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 790, "usage_type": "call"}, {"api_name": "sqlalchemy.delete", "line_number": 793, "usage_type": "call"}, {"api_name": "sqlalchemy.update", "line_number": 795, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 803, "usage_type": "call"}, {"api_name": "base.Model", "line_number": 917, "usage_type": "argument"}, {"api_name": "sqlalchemy.func.count", "line_number": 1002, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 1002, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func", "line_number": 1009, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func", "line_number": 1015, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func", "line_number": 1023, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 1025, "usage_type": "call"}, {"api_name": "sqlalchemy.exists", "line_number": 1031, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 1046, "usage_type": "call"}, {"api_name": "atom.api.Atom", "line_number": 1067, "usage_type": "name"}, {"api_name": "atom.api.Instance", "line_number": 1069, "usage_type": "call"}, {"api_name": "atom.api.ContainerList", "line_number": 1072, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.ddl.SchemaGenerator", "line_number": 1102, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.ddl", "line_number": 1102, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.ddl.SchemaDropper", "line_number": 1107, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.ddl", "line_number": 1107, "usage_type": "name"}, {"api_name": "base.ModelMeta", "line_number": 1137, "usage_type": "name"}, {"api_name": "base.ModelMeta.__new__", "line_number": 1142, "usage_type": "call"}, {"api_name": "base.ModelMeta", "line_number": 1142, "usage_type": "name"}, {"api_name": "base.Model", "line_number": 1219, "usage_type": "name"}, {"api_name": "atom.api.Typed", "line_number": 1226, "usage_type": "call"}, {"api_name": "base.JSONModel", "line_number": 1353, "usage_type": "argument"}, {"api_name": "sqlalchemy.select", "line_number": 1382, "usage_type": "call"}]} +{"seq_id": "304514189", "text": "\"\"\"\nGeneral utility functions.\n\"\"\"\nimport datetime\nimport json\nfrom pathlib import Path\nfrom typing import Callable\n\nimport numpy as np\nimport talib\n\nfrom .object import BarData, TickData\nfrom .constant import Exchange, Interval, KlinePattern\nfrom .algorithm import Algorithm\nfrom talib import abstract\nfrom typing import List\n\nclass IntervalGen:\n\n data = []\n temp_data = []\n current = None\n def __init__(self, process, interval):\n self.process = process\n self.interverl = interval\n \n def update(self, data):\n if len(self.temp_data) < self.interverl:\n self.temp_data.append(data)\n self.current = self.process(self.temp_data)\n else:\n self.temp_data.append(data)\n self.current = self.process(self.temp_data)\n self.data.append(self.current * 1000)\n self.temp_data = []\n \n return self.current * 1000\n\n\ndef extract_vt_symbol(vt_symbol: str):\n \"\"\"\n :return: (symbol, exchange)\n \"\"\"\n symbol, exchange_str = vt_symbol.split(\".\")\n return symbol, Exchange(exchange_str)\n\n\ndef generate_vt_symbol(symbol: str, exchange: Exchange):\n \"\"\"\n return vt_symbol\n \"\"\"\n return f\"{symbol}.{exchange.value}\"\n\n\ndef _get_trader_dir(temp_name: str):\n \"\"\"\n Get path where trader is running in.\n \"\"\"\n cwd = Path.cwd()\n temp_path = cwd.joinpath(temp_name)\n\n # If .vntrader folder exists in current working directory,\n # then use it as trader running path.\n if temp_path.exists():\n return cwd, temp_path\n\n # Otherwise use home path of system.\n home_path = Path.home()\n temp_path = home_path.joinpath(temp_name)\n\n # Create .vntrader folder under home path if not exist.\n if not temp_path.exists():\n temp_path.mkdir()\n\n return home_path, temp_path\n\n\nTRADER_DIR, TEMP_DIR = _get_trader_dir(\".vntrader\")\n\n\ndef get_file_path(filename: str):\n \"\"\"\n Get path for temp file with filename.\n \"\"\"\n return TEMP_DIR.joinpath(filename)\n\n\ndef get_folder_path(folder_name: str):\n \"\"\"\n Get path for temp folder with folder name.\n \"\"\"\n folder_path = TEMP_DIR.joinpath(folder_name)\n if not folder_path.exists():\n folder_path.mkdir()\n return folder_path\n\n\ndef get_icon_path(filepath: str, ico_name: str):\n \"\"\"\n Get path for icon file with ico name.\n \"\"\"\n ui_path = Path(filepath).parent\n icon_path = ui_path.joinpath(\"ico\", ico_name)\n return str(icon_path)\n\n\ndef load_json(filename: str):\n \"\"\"\n Load data from json file in temp path.\n \"\"\"\n filepath = get_file_path(filename)\n\n if filepath.exists():\n with open(filepath, mode=\"r\", encoding=\"UTF-8\") as f:\n data = json.load(f)\n return data\n else:\n save_json(filename, {})\n return {}\n\n\ndef save_json(filename: str, data: dict):\n \"\"\"\n Save data into json file in temp path.\n \"\"\"\n filepath = get_file_path(filename)\n with open(filepath, mode=\"w+\", encoding=\"UTF-8\") as f:\n json.dump(\n data,\n f,\n indent=4,\n ensure_ascii=False\n )\n\n\ndef round_to(value: float, target: float):\n \"\"\"\n Round price to price tick value.\n \"\"\"\n rounded = int(round(value / target)) * target\n return rounded\n\n\nclass BarGenerator:\n \"\"\"\n For: \n 1. generating 1 minute bar data from tick data\n 2. generateing x minute bar/x hour bar data from 1 minute data\n\n Notice:\n 1. for x minute bar, x must be able to divide 60: 2, 3, 5, 6, 10, 15, 20, 30\n 2. for x hour bar, x can be any number\n \"\"\"\n\n def __init__(\n self,\n on_bar: Callable,\n window: int = 0,\n on_window_bar: Callable = None,\n interval: Interval = Interval.MINUTE,\n tz_info = None\n ):\n \"\"\"Constructor\"\"\"\n self.bar = None\n self.on_bar = on_bar\n\n self.interval = interval\n self.interval_count = 0\n\n self.window = window\n self.window_bar = None\n self.on_window_bar = on_window_bar\n\n self.last_tick = None\n self.last_bar = None\n self.tz_info = tz_info\n\n def local_to_timezone(self, dt:datetime.datetime):\n if self.tz_info is None:\n return dt\n else:\n return datetime.datetime.fromtimestamp(dt.timestamp(), self.tz_info)\n\n def update_tick(self, tick: TickData):\n \"\"\"\n Update new tick data into generator.\n \"\"\"\n new_minute = False\n\n # Filter tick data with 0 last price\n if not tick.last_price:\n return\n\n if not self.bar:\n new_minute = True\n elif self.bar.datetime.minute != tick.datetime.minute:\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.datetime = self.local_to_timezone(self.bar.datetime)\n self.on_bar(self.bar)\n\n new_minute = True\n\n if new_minute:\n self.bar = BarData(\n symbol=tick.symbol,\n exchange=tick.exchange,\n interval=Interval.MINUTE,\n datetime=tick.datetime,\n gateway_name=tick.gateway_name,\n open_price=tick.last_price,\n high_price=tick.last_price,\n low_price=tick.last_price,\n close_price=tick.last_price,\n open_interest=tick.open_interest\n )\n else:\n self.bar.high_price = max(self.bar.high_price, tick.last_price)\n self.bar.low_price = min(self.bar.low_price, tick.last_price)\n self.bar.close_price = tick.last_price\n self.bar.open_interest = tick.open_interest\n self.bar.datetime = tick.datetime\n\n if self.last_tick:\n volume_change = tick.volume - self.last_tick.volume\n self.bar.volume += max(volume_change, 0)\n\n self.last_tick = tick\n\n def update_bar(self, bar: BarData):\n \"\"\"\n Update 1 minute bar into generator\n \"\"\"\n # If not inited, creaate window bar object\n if not self.window_bar:\n # Generate timestamp for bar data\n if self.interval == Interval.MINUTE:\n dt = bar.datetime.replace(second=0, microsecond=0)\n else:\n dt = bar.datetime.replace(minute=0, second=0, microsecond=0)\n dt = self.local_to_timezone(dt)\n self.window_bar = BarData(\n symbol=bar.symbol,\n exchange=bar.exchange,\n datetime=dt,\n gateway_name=bar.gateway_name,\n open_price=bar.open_price,\n high_price=bar.high_price,\n low_price=bar.low_price\n )\n # Otherwise, update high/low price into window bar\n else:\n self.window_bar.high_price = max(\n self.window_bar.high_price, bar.high_price)\n self.window_bar.low_price = min(\n self.window_bar.low_price, bar.low_price)\n\n # Update close price/volume into window bar\n self.window_bar.close_price = bar.close_price\n self.window_bar.volume += int(bar.volume)\n self.window_bar.open_interest = bar.open_interest\n\n # Check if window bar completed\n finished = False\n\n if self.interval == Interval.MINUTE:\n # x-minute bar\n if not (bar.datetime.minute + 1) % self.window:\n finished = True\n elif self.interval == Interval.HOUR:\n if self.last_bar and bar.datetime.hour != self.last_bar.datetime.hour:\n # 1-hour bar\n if self.window == 1:\n finished = True\n # x-hour bar\n else:\n self.interval_count += 1\n\n if not self.interval_count % self.window:\n finished = True\n self.interval_count = 0\n\n if finished:\n self.on_window_bar(self.window_bar)\n self.window_bar = None\n\n # Cache last bar object\n self.last_bar = bar\n\n def generate(self):\n \"\"\"\n Generate the bar data and call callback immediately.\n \"\"\"\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.datetime = self.local_to_timezone(self.bar.datetime)\n self.on_bar(self.bar)\n self.bar = None\n\n\nclass ArrayManager(object):\n \"\"\"\n For:\n 1. time series container of bar data\n 2. calculating technical indicator value\n \"\"\"\n\n def __init__(self, size=100):\n \"\"\"Constructor\"\"\"\n self.count = 0\n self.size = size\n self.inited = False\n\n self.open_array = np.zeros(size)\n self.high_array = np.zeros(size)\n self.low_array = np.zeros(size)\n self.close_array = np.zeros(size)\n self.volume_array = np.zeros(size)\n self.time_array = [1] * size\n self.range_array = np.zeros(size)\n self.extra_array = []\n self.pattern_init = False\n for i in range(size):\n self.extra_array.append({\"pattern\":[]})\n \n \n # def maxmin(data,fastk_period):\n # close_prices = np.nan_to_num(np.array([v['close'] for v in data]))\n # max_prices = np.nan_to_num(np.array([v['high'] for v in data]))\n # min_prices = np.nan_to_num(np.array([v['low'] for v in data]))\n \n # max_close = talib.MAX(self.high_array, timeperiod=fastk_period)\n # min_close = talib.MIN(self.low_array, timeperiod=fastk_period)\n \n # for k in range(len(min_prices)):\n # if k1:\n # aaa = talib.MIN(min_prices,timeperiod=k)\n # bbb = talib.MAX(max_prices,timeperiod=k)\n # min_close[k]= aaa[k]\n # max_close[k]= bbb[k]\n # elif k==1 or k==0:\n # min_close[k]=min_prices[k]\n # max_close[k]=max_prices[k]\n \n # indicators= {\n # 'close': close_prices,\n # 'max': max_close,\n # 'min': min_close\n # }\n # return indicators\n def pattern(self, pattern_tags, start = -20):\n\n inputs = {\n 'open': self.open_array[start:],\n 'high': self.high_array[start:],\n 'low': self.low_array[start:],\n 'close': self.close_array[start:],\n 'volume': self.volume_array[start:]\n }\n\n result = []\n for tag in pattern_tags:\n func = abstract.Function(tag.value)\n r = func(inputs)\n w_r = np.where(r != 0)\n if len(w_r) > 0 and w_r[0].size > 0:\n for i in w_r[0]:\n index = i + start\n if tag not in self.extra_array[index][\"pattern\"]:\n self.extra_array[index][\"pattern\"].append(tag)\n result.append((tag,r[i]))\n\n # 第一次调用返回空列表\n if not self.pattern_init:\n self.pattern_init = True\n return []\n \n return result\n\n def wave(self, window = 0.0003):\n\n data = self.close_array\n \n return Algorithm.wave(data, window)\n\n def kdj(self, fastk_period=9, slowk_period=3, slowd_period=3):\n #计算kd指标\n return Algorithm.kdj(self.high_array, self.low_array, self.close_array, fastk_period, slowk_period, slowd_period)\n\n def update_bar(self, bar):\n \"\"\"\n Update new bar data into array manager.\n \"\"\"\n self.count += 1\n if not self.inited and self.count >= self.size:\n self.inited = True\n\n self.open_array[:-1] = self.open_array[1:]\n self.high_array[:-1] = self.high_array[1:]\n self.low_array[:-1] = self.low_array[1:]\n self.close_array[:-1] = self.close_array[1:]\n self.volume_array[:-1] = self.volume_array[1:]\n self.time_array[:-1] = self.time_array[1:]\n self.extra_array[:-1] = self.extra_array[1:]\n self.range_array[:-1] = self.range_array[1:]\n\n self.open_array[-1] = bar.open_price\n self.high_array[-1] = bar.high_price\n self.low_array[-1] = bar.low_price\n self.close_array[-1] = bar.close_price\n self.volume_array[-1] = bar.volume\n self.time_array[-1] = bar.datetime\n self.extra_array[-1] = {\"pattern\":[]}\n if self.count > 1:\n self.range_array[:-1] = self.range_array[1:]\n self.range_array[-1] = round(self.close_array[-1] / self.close_array[-2] - 1, 6)\n else:\n self.range_array[-1] = 0\n\n @property\n def time(self):\n \"\"\"\n Get open price time series.\n \"\"\"\n return self.time_array\n\n @property\n def open(self):\n \"\"\"\n Get open price time series.\n \"\"\"\n return self.open_array\n\n @property\n def high(self):\n \"\"\"\n Get high price time series.\n \"\"\"\n return self.high_array\n\n @property\n def range(self):\n \"\"\"\n Get low price time series.\n \"\"\"\n return self.range_array\n\n @property\n def low(self):\n \"\"\"\n Get low price time series.\n \"\"\"\n return self.low_array\n\n @property\n def close(self):\n \"\"\"\n Get close price time series.\n \"\"\"\n return self.close_array\n\n @property\n def volume(self):\n \"\"\"\n Get trading volume time series.\n \"\"\"\n return self.volume_array\n\n def sma(self, n, array=False, length=None):\n \"\"\"\n Simple moving average.\n \"\"\"\n if array:\n if length is not None:\n result = talib.SMA(self.close[-length:], n)\n else:\n result = talib.SMA(self.close, n)\n return result\n else:\n length = n + 1\n result = talib.SMA(self.close[-length:], n)\n return result[-1]\n\n def std(self, n, array=False, length=None):\n \"\"\"\n Standard deviation\n \"\"\"\n if array:\n if length is not None:\n result = talib.STDDEV(self.close[-length:], n)\n else:\n result = talib.STDDEV(self.close, n)\n return result\n else:\n length = n + 1\n result = talib.STDDEV(self.close[-length:], n)\n return result[-1]\n\n def cci(self, n, array=False, length=None):\n \"\"\"\n Commodity Channel Index (CCI).\n \"\"\"\n \n if array:\n if length is not None:\n result = talib.CCI(self.high[-length:], self.low[-length:], self.close[-length:], n)\n else:\n result = talib.CCI(self.high, self.low, self.close, n)\n return result\n else:\n l = n+1\n result = talib.CCI(self.high[-l:], self.low[-l:], self.close[-l:], n)\n return result[-1]\n \n\n def atr(self, n, array=False, length=None):\n \"\"\"\n Average True Range (ATR).\n \"\"\"\n if array:\n if length is not None:\n result = talib.ATR(self.high[-length:], self.low[-length:], self.close[-length:], n)\n else:\n result = talib.ATR(self.high, self.low, self.close, n)\n return result\n else:\n l = n+1\n result = talib.ATR(self.high[-l:], self.low[-l:], self.close[-l:], n)\n return result[-1]\n\n def rsi(self, n, array=False, length=None):\n \"\"\"\n Relative Strenght Index (RSI).\n \"\"\"\n if array:\n if length is not None:\n result = talib.RSI(self.close[-length:], n)\n else:\n result = talib.RSI(self.close, n)\n return result\n else:\n l = n+1\n result = talib.RSI(self.close[-l:], n)\n return result[-1]\n\n def macd(self, fast_period, slow_period, signal_period, array=False, length=None):\n \"\"\"\n MACD.\n \"\"\"\n if length is not None:\n macd, signal, hist = talib.MACD(\n self.close[-length:], fast_period, slow_period, signal_period\n )\n else:\n macd, signal, hist = talib.MACD(\n self.close, fast_period, slow_period, signal_period\n ) \n if array:\n return macd, signal, hist\n return macd[-1], signal[-1], hist[-1]\n\n def adx(self, n, array=False, length=None):\n \"\"\"\n ADX.\n \"\"\"\n if array:\n if length is not None:\n result = talib.ADX(self.high[-length:], self.low[-length:], self.close[-length:], n)\n else:\n result = talib.ADX(self.high, self.low, self.close, n)\n return result\n else:\n l = n * 2\n result = talib.ADX(self.high[-l:], self.low[-l:], self.close[-l:], n)\n return result[-1]\n\n def boll(self, n, dev, array=False, length=None):\n \"\"\"\n Bollinger Channel.\n \"\"\"\n if length is not None:\n mid = self.sma(n, array, length=length)\n std = self.std(n, array, length=length)\n else:\n mid = self.sma(n, array)\n std = self.std(n, array) \n\n up = mid + std * dev\n down = mid - std * dev\n\n return up, down\n\n def keltner(self, n, dev, array=False):\n \"\"\"\n Keltner Channel.\n \"\"\"\n mid = self.sma(n, array)\n atr = self.atr(n, array)\n\n up = mid + atr * dev\n down = mid - atr * dev\n\n return up, down\n\n def donchian(self, n, array=False):\n \"\"\"\n Donchian Channel.\n \"\"\"\n up = talib.MAX(self.high, n)\n down = talib.MIN(self.low, n)\n\n if array:\n return up, down\n return up[-1], down[-1]\n\n\ndef virtual(func: \"callable\"):\n \"\"\"\n mark a function as \"virtual\", which means that this function can be override.\n any base class should use this or @abstractmethod to decorate all functions\n that can be (re)implemented by subclasses.\n \"\"\"\n return func\n", "sub_path": "vnpy/trader/utility.py", "file_name": "utility.py", "file_ext": "py", "file_size_in_byte": 18195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "constant.Exchange", "line_number": 45, "usage_type": "call"}, {"api_name": "constant.Exchange", "line_number": 48, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 59, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 59, "usage_type": "name"}, {"api_name": "pathlib.Path.home", "line_number": 68, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 102, "usage_type": "call"}, {"api_name": "json.load", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 128, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 159, "usage_type": "name"}, {"api_name": "constant.Interval", "line_number": 160, "usage_type": "name"}, {"api_name": "constant.Interval.MINUTE", "line_number": 160, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 182, "usage_type": "attribute"}, {"api_name": "object.TickData", "line_number": 184, "usage_type": "name"}, {"api_name": "object.BarData", "line_number": 206, "usage_type": "call"}, {"api_name": "constant.Interval.MINUTE", "line_number": 209, "usage_type": "attribute"}, {"api_name": "constant.Interval", "line_number": 209, "usage_type": "name"}, {"api_name": "object.BarData", "line_number": 231, "usage_type": "name"}, {"api_name": "constant.Interval.MINUTE", "line_number": 238, "usage_type": "attribute"}, {"api_name": "constant.Interval", "line_number": 238, "usage_type": "name"}, {"api_name": "object.BarData", "line_number": 243, "usage_type": "call"}, {"api_name": "constant.Interval.MINUTE", "line_number": 267, "usage_type": "attribute"}, {"api_name": "constant.Interval", "line_number": 267, "usage_type": "name"}, {"api_name": "constant.Interval.HOUR", "line_number": 271, "usage_type": "attribute"}, {"api_name": "constant.Interval", "line_number": 271, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 322, "usage_type": "call"}, {"api_name": "talib.abstract.Function", "line_number": 365, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 365, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 367, "usage_type": "call"}, {"api_name": "algorithm.Algorithm.wave", "line_number": 386, "usage_type": "call"}, {"api_name": "algorithm.Algorithm", "line_number": 386, "usage_type": "name"}, {"api_name": "algorithm.Algorithm.kdj", "line_number": 390, "usage_type": "call"}, {"api_name": "algorithm.Algorithm", "line_number": 390, "usage_type": "name"}, {"api_name": "talib.SMA", "line_number": 477, "usage_type": "call"}, {"api_name": "talib.SMA", "line_number": 479, "usage_type": "call"}, {"api_name": "talib.SMA", "line_number": 483, "usage_type": "call"}, {"api_name": "talib.STDDEV", "line_number": 492, "usage_type": "call"}, {"api_name": "talib.STDDEV", "line_number": 494, "usage_type": "call"}, {"api_name": "talib.STDDEV", "line_number": 498, "usage_type": "call"}, {"api_name": "talib.CCI", "line_number": 508, "usage_type": "call"}, {"api_name": "talib.CCI", "line_number": 510, "usage_type": "call"}, {"api_name": "talib.CCI", "line_number": 514, "usage_type": "call"}, {"api_name": "talib.ATR", "line_number": 524, "usage_type": "call"}, {"api_name": "talib.ATR", "line_number": 526, "usage_type": "call"}, {"api_name": "talib.ATR", "line_number": 530, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 539, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 541, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 545, "usage_type": "call"}, {"api_name": "talib.MACD", "line_number": 553, "usage_type": "call"}, {"api_name": "talib.MACD", "line_number": 557, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 570, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 572, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 576, "usage_type": "call"}, {"api_name": "talib.MAX", "line_number": 611, "usage_type": "call"}, {"api_name": "talib.MIN", "line_number": 612, "usage_type": "call"}]} +{"seq_id": "571550198", "text": "import unittest\nfrom torch.distributions import Normal, Exponential, Independent, LogNormal\nfrom pyfilter.filters import UKF, APF\nfrom pyfilter.timeseries import AffineProcess, LinearGaussianObservations\nfrom pyfilter.utils import concater\nfrom pyfilter.normalization import normalize\nimport torch\nfrom pyfilter.inference.sequential import NESSMC2, NESS, SMC2FW, SMC2\n\n\ndef f(x, alpha, sigma):\n return alpha * x\n\n\ndef g(x, alpha, sigma):\n return sigma\n\n\ndef fo(x, alpha, sigma):\n return alpha * x\n\n\ndef go(x, alpha, sigma):\n return sigma\n\n\ndef fmvn(x, alpha, sigma):\n x1 = alpha * x[..., 0]\n x2 = x[..., 1]\n return concater(x1, x2)\n\n\ndef gmvn(x, alpha, sigma):\n return concater(sigma, sigma)\n\n\nclass MyTestCase(unittest.TestCase):\n def test_Inference(self):\n # ===== Distributions ===== #\n dist = Normal(0., 1.)\n mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1)\n\n # ===== Define model ===== #\n linear = AffineProcess((f, g), (0.99, 0.25), dist, dist)\n model = LinearGaussianObservations(linear, scale=0.1)\n\n mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn)\n mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1)\n\n # ===== Test for multiple models ===== #\n priors = Exponential(1.), LogNormal(0., 1.)\n\n hidden1d = AffineProcess((f, g), priors, dist, dist)\n oned = LinearGaussianObservations(hidden1d, 1., scale=0.1)\n\n hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn)\n twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2))\n\n particles = 1000\n # ====== Run inference ===== #\n for trumod, model in [(model, oned), (mvnmodel, twod)]:\n x, y = trumod.sample_path(1000)\n\n algs = [\n (NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}),\n (NESS, {'particles': particles, 'filter_': UKF(model.copy())}),\n (SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}),\n (SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}),\n (NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)})\n ]\n\n for alg, props in algs:\n alg = alg(**props)\n state = alg.fit(y)\n\n w = normalize(state.w)\n\n zipped = zip(\n trumod.hidden.theta + trumod.observable.theta, # True parameter values\n alg.filter.ssm.hidden.theta + alg.filter.ssm.observable.theta # Inferred\n )\n\n for trup, p in zipped:\n if not p.trainable:\n continue\n\n kde = p.get_kde(weights=w)\n\n transed = p.bijection.inv(trup)\n densval = kde.logpdf(transed.numpy().reshape(-1, 1))\n priorval = p.distr.log_prob(trup)\n\n assert (densval > priorval.numpy()).all()\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "test/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 3115, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pyfilter.utils.concater", "line_number": 30, "usage_type": "call"}, {"api_name": "pyfilter.utils.concater", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.distributions.Normal", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.distributions.Independent", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.distributions.Normal", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 41, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.AffineProcess", "line_number": 44, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.LinearGaussianObservations", "line_number": 45, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.AffineProcess", "line_number": 47, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.LinearGaussianObservations", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.distributions.Exponential", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.distributions.LogNormal", "line_number": 51, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.AffineProcess", "line_number": 53, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.LinearGaussianObservations", "line_number": 54, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.AffineProcess", "line_number": 56, "usage_type": "call"}, {"api_name": "pyfilter.timeseries.LinearGaussianObservations", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 57, "usage_type": "call"}, {"api_name": "pyfilter.inference.sequential.NESS", "line_number": 65, "usage_type": "name"}, {"api_name": "pyfilter.filters.APF", "line_number": 65, "usage_type": "call"}, {"api_name": "pyfilter.inference.sequential.NESS", "line_number": 66, "usage_type": "name"}, {"api_name": "pyfilter.filters.UKF", "line_number": 66, "usage_type": "call"}, {"api_name": "pyfilter.inference.sequential.SMC2", "line_number": 67, "usage_type": "name"}, {"api_name": "pyfilter.filters.APF", "line_number": 67, "usage_type": "call"}, {"api_name": "pyfilter.inference.sequential.SMC2FW", "line_number": 68, "usage_type": "name"}, {"api_name": "pyfilter.filters.APF", "line_number": 68, "usage_type": "call"}, {"api_name": "pyfilter.inference.sequential.NESSMC2", "line_number": 69, "usage_type": "name"}, {"api_name": "pyfilter.filters.APF", "line_number": 69, "usage_type": "call"}, {"api_name": "pyfilter.normalization.normalize", "line_number": 76, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "122682778", "text": "import collections \nclass Graph: \n def __init__(self,v): \n self.nv=v \n self.graph=collections.defaultdict(list) \n self.count=0 \n def checkbc(self):\n ss=self.nv\n visited=[False]*ss \n d=[float('inf')]*ss \n low=[float('inf')]*ss \n parent=[-1]*ss \n if self.biconfunc(0,visited,parent,low,d): \n return False\n for i in visited:\n if not(i):\n return False\n return True\n def biconfunc(self,u,visited,parent,low,d): \n children=0\n visited[u]=True\n d[u]=self.count \n low[u]=self.count \n self.count+=1\n for v in self.graph[u]: \n if not(visited[v]): \n parent[v]=u \n children+=1\n if self.biconfunc(v,visited,parent,low,d): \n return True\n low[u]=min(low[u],low[v]) \n if parent[u]==-1 and children>1: \n return True\n if parent[u]!=-1 and low[v]>=d[u]: \n return True\t\n elif v!=parent[u]: \n low[u]=min(low[u],d[v]) \n return False\nn=int(input(\"Enter the no of vertices\"))\ng=Graph(n)\ne=int(input(\"Enter the no of edges\"))\nprint(\"Enter the vertex pairs to which edges must be constructed\")\nfor i in range(e):\n a,b=map(int,input().split())\n g.graph[a-1].append(b-1) \n g.graph[b-1].append(a-1) \ndic={True:\"It is biconnected\",False:\"Not biconnected\"}\nprint(dic[g.checkbc()])\n", "sub_path": "bicon.py", "file_name": "bicon.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "7425359", "text": "from .config import logger\nimport uuid\nfrom .child_filter import get_filter_values, apply_filter\nfrom .config import drug_like_params\nfrom rdkit import Chem\nfrom .selfies_methods import (\n selfies_substitution,\n selfies_deletion,\n selfies_insertion,\n random_selfies_generator,\n selfies_scanner,\n)\nfrom typing import List, Set\nfrom collections import defaultdict\nfrom .fragment import libgen\nfrom peewee import SqliteDatabase\nfrom .lib_read import lib_read\nimport re\nfrom .fragment_index import frag_index\nfrom .mate import mate\nimport os\nimport random\nfrom .jensen_crossover import crossover as crossover_gb\nfrom .jensen_mutate import mutate as mutate_gb\nfrom .jensen_selfies_crossover import crossover as selfies_crossover_gb\nfrom .jensen_selfies_mutate import mutate as selfies_mutate_gb\nfrom crem.crem import grow_mol as crem_grow\nfrom crem.crem import mutate_mol as crem_mutate\nfrom copy import deepcopy\n\n\nclass Deriver(object):\n def __init__(self):\n\n logger.info(\"Initializing a new Deriver object!\")\n self.data = self._Data()\n\n class _Data(object):\n\n \"\"\"\n This object is meant to be a home for all the data and parameters used by deriver.\n It should be fine to read things from here, but do not set anything directly. Instead,\n use the methods that begin with \"set_\" in order to change these values. This\n prevents incompatible changes from being made silently.\n \"\"\"\n\n def __init__(self):\n self.seed_smiles = None\n self.seed_mols = None\n self.filter_params = deepcopy(drug_like_params)\n self.filter = False\n self.child_db = None\n self.all_good_selfies_children = None\n self.all_good_scanner_children = None\n self.all_good_selfies_gb_children = None\n self.all_good_smiles_gb_children = None\n self.all_good_local_children = None\n self.filter_molecules = None\n self.must_have_patterns = None\n self.must_not_have_patterns = None\n self.heritage = defaultdict(list)\n self.track_heritage = True\n # BRICS specific\n self.seed_frags = None # these are the fragments of the seed molecules\n self.fragment_source_db = None # this is the location of the fragment DB\n self.seed_frag_db = None # the is the DB where the seed_frags are stored and info about them\n self.all_good_brics_children = (\n None # this is where the good (filtered) BRICS children are saved\n )\n self.crem_source_db = None # the location of the crem fragment database used by the local space methods\n\n def set_seeds(self, seeds: list):\n\n \"\"\"\n set the seeds that are used to generate new molecules\n :param seeds:\n :return:\n \"\"\"\n\n if isinstance(seeds[0], Chem.rdchem.Mol):\n self.data.seed_smiles = []\n self.data.seed_mols = []\n for seed in seeds:\n smile = Chem.MolToSmiles(seed, isomericSmiles=True)\n self.data.seed_smiles.append(smile)\n self.data.seed_mols.append(seed)\n elif isinstance(seeds[0], str):\n self.data.seed_smiles = []\n self.data.seed_mols = []\n for smile in seeds:\n seed = Chem.MolFromSmiles(smile, sanitize=True)\n iso_smile = Chem.MolToSmiles(seed, isomericSmiles=True)\n self.data.seed_smiles.append(iso_smile)\n self.data.seed_mols.append(seed)\n else:\n logger.error(\n \"Seeds must be provided as an iterable of Mol objects, or SMILES strings.\"\n )\n return 0\n\n return 1\n\n def set_must_have_patterns(self, must_have_patterns: List[str]):\n if isinstance(must_have_patterns, list):\n assert isinstance(must_have_patterns[0], str)\n elif must_have_patterns is None:\n pass\n else:\n raise TypeError(\n \"must_have_patterns must be None or a list of SMARTS strings\"\n )\n self.data.must_have_patterns = must_have_patterns\n\n def set_must_not_have_patterns(self, must_not_have_patterns: List[str]):\n if isinstance(must_not_have_patterns, list):\n assert isinstance(must_not_have_patterns[0], str)\n elif must_not_have_patterns is None:\n pass\n else:\n raise TypeError(\n \"must_have_patterns must be None or a list of SMARTS strings\"\n )\n self.data.must_not_have_patterns = must_not_have_patterns\n\n def set_filter_molecules(self, filter_molecules: Set[str]):\n assert isinstance(filter_molecules, set)\n assert isinstance(iter(filter_molecules).__next__(), str)\n self.data.filter_molecules = filter_molecules\n return 1\n\n def toggle_heritage_tracking(self):\n self.data.track_heritage = not self.data.track_heritage\n logger.info(f\"Heritage tracking is now {self.data.track_heritage}\")\n return 1\n\n def enable_and_expand_filter(self, seeds: list = None):\n\n \"\"\"\n Turn on the filtering of produced molecules, and expand the ranges to include the provided\n seed molecules, or the stored seed molecules.\n :param seeds:\n :return:\n \"\"\"\n\n if seeds is None:\n if self.data.seed_smiles is None:\n logger.info(\"Turning on filter using default filter values:\")\n logger.info(self.data.filter_params)\n self.data.filter = True\n return 1\n else:\n logger.info(\n f\"Expanding filter ranges using current seed set: {self.data.seed_smiles}\"\n )\n seeds = self.data.seed_mols\n for seed_mol in seeds:\n parent_values = get_filter_values(seed_mol)\n self._update_filter_params(parent_values)\n logger.info(\"Done! Turning on filter using new filter values:\")\n logger.info(self.data.filter_params)\n self.data.filter = True\n return 1\n\n def manual_filter_set(self, var: str, val1, val2=None):\n\n \"\"\"\n Use this function to change the filter values safely.\n :param var:\n :param val1:\n :param val2:\n :return:\n \"\"\"\n\n if var not in self.data.filter_params:\n logger.error(f\"{var} is not a valid filter parameter, try again.\")\n raise Exception\n\n logger.info(\n f\"Updating {var} from {self.data.filter_params[var]} to {val1}, {val2}\"\n )\n\n if isinstance(self.data.filter_params[var], tuple):\n\n if val2 is None:\n logger.error(\n f\"{var} requires TWO values, lower and upper bounds, to be set.\"\n )\n raise Exception\n\n elif val2 <= val1:\n logger.error(\n f\"{var} requires TWO values, lower and upper bounds, to be set. \"\n f\"The second must be LARGER than the first.\"\n )\n raise Exception\n\n else:\n # actually set the values!\n if isinstance(self.data.filter_params[var][0], int):\n val1 = int(val1)\n val2 = int(val2)\n self.data.filter_params[var] = (val1, val2)\n\n else:\n # here set these values\n if isinstance(self.data.filter_params[var], int):\n val1 = int(val1)\n self.data.filter_params[var] = val1\n\n logger.info(f\"Done! Current filter parameters are: {self.data.filter_params}\")\n\n return 1\n\n def _update_filter_params(self, parent_values):\n\n filter_params = {\n # ranges\n \"MW\": (\n min(parent_values[\"MW\"], self.data.filter_params[\"MW\"][0]),\n max(parent_values[\"MW\"], self.data.filter_params[\"MW\"][1]),\n ),\n \"num_heavy_atoms\": (\n min(\n parent_values[\"num_heavy_atoms\"],\n self.data.filter_params[\"num_heavy_atoms\"][0],\n ),\n max(\n parent_values[\"num_heavy_atoms\"],\n self.data.filter_params[\"num_heavy_atoms\"][1],\n ),\n ),\n \"num_carbons\": (\n min(\n parent_values[\"num_carbons\"],\n self.data.filter_params[\"num_carbons\"][0],\n ),\n max(\n parent_values[\"num_carbons\"],\n self.data.filter_params[\"num_carbons\"][1],\n ),\n ),\n \"num_hetero_atoms\": (\n min(\n parent_values[\"num_hetero_atoms\"],\n self.data.filter_params[\"num_hetero_atoms\"][0],\n ),\n max(\n parent_values[\"num_hetero_atoms\"],\n self.data.filter_params[\"num_hetero_atoms\"][1],\n ),\n ),\n \"hc_ratio\": (\n min(parent_values[\"hc_ratio\"], self.data.filter_params[\"hc_ratio\"][0]),\n max(parent_values[\"hc_ratio\"], self.data.filter_params[\"hc_ratio\"][1]),\n ),\n \"charge\": (\n min(parent_values[\"charge\"], self.data.filter_params[\"charge\"][0]),\n max(parent_values[\"charge\"], self.data.filter_params[\"charge\"][1]),\n ),\n \"logP\": (\n min(parent_values[\"logP\"], self.data.filter_params[\"logP\"][0]),\n max(parent_values[\"logP\"], self.data.filter_params[\"logP\"][1]),\n ),\n \"fSP3\": (\n min(parent_values[\"fSP3\"], self.data.filter_params[\"fSP3\"][0]),\n max(parent_values[\"fSP3\"], self.data.filter_params[\"fSP3\"][1]),\n ),\n # upper limits\n \"HBA\": max(parent_values[\"HBA\"], self.data.filter_params[\"HBA\"]),\n \"HBD\": max(parent_values[\"HBD\"], self.data.filter_params[\"HBD\"]),\n \"tPSA\": max(parent_values[\"tPSA\"], self.data.filter_params[\"tPSA\"]),\n \"rot_bonds\": max(\n parent_values[\"rot_bonds\"], self.data.filter_params[\"rot_bonds\"]\n ),\n \"rigid_bonds\": max(\n parent_values[\"rigid_bonds\"], self.data.filter_params[\"rigid_bonds\"]\n ),\n \"num_rings\": max(\n parent_values[\"num_rings\"], self.data.filter_params[\"num_rings\"]\n ),\n \"max_ring_size\": max(\n parent_values[\"max_ring_size\"], self.data.filter_params[\"max_ring_size\"]\n ),\n \"num_charges\": max(\n parent_values[\"num_charges\"], self.data.filter_params[\"num_charges\"]\n ),\n \"num_chiral_centers\": max(\n parent_values[\"num_chiral_centers\"],\n self.data.filter_params[\"num_chiral_centers\"],\n ),\n }\n\n self.data.filter_params = filter_params\n\n def derive_selfies(\n self,\n n_children: int = 100,\n mut_rate: float = 0.03,\n mut_min: int = 1,\n mut_max: int = 2,\n ):\n\n good_children = []\n all_filtered_children = {}\n self.data.all_good_selfies_children = []\n n_seeds = len(self.data.seed_smiles)\n if n_children < n_seeds:\n n_children_per_seed = 1\n else:\n n_children_per_seed = round(n_children / n_seeds) + 1\n\n logger.info(\n f\"Mutating SELFIES to create {n_children_per_seed} children per seed.\"\n )\n\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n\n for seed in self.data.seed_smiles:\n children = selfies_substitution(\n parent_smiles=seed,\n n_children=round(n_children_per_seed * 0.7), # three internal methods\n mut_rate=mut_rate,\n mut_min=mut_min,\n mut_max=mut_max,\n )\n if self.data.track_heritage:\n self.data.heritage[seed] += children\n child_mols = [\n Chem.MolFromSmiles(child, sanitize=True) for child in children\n ]\n\n children = selfies_insertion(\n parent_smiles=seed,\n n_children=round(n_children_per_seed * 0.15), # three internal methods\n mut_rate=mut_rate,\n mut_min=mut_min,\n mut_max=mut_max,\n )\n if self.data.track_heritage:\n self.data.heritage[seed] += children\n child_mols += [\n Chem.MolFromSmiles(child, sanitize=True) for child in children\n ]\n\n children = selfies_deletion(\n parent_smiles=seed,\n n_children=round(n_children_per_seed * 0.15), # three internal methods\n mut_rate=mut_rate,\n mut_min=mut_min,\n mut_max=mut_max,\n )\n if self.data.track_heritage:\n self.data.heritage[seed] += children\n child_mols += [\n Chem.MolFromSmiles(child, sanitize=True) for child in children\n ]\n\n # filter children\n filtered_children = apply_filter(\n filter_params,\n child_mols,\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n all_filtered_children.update(filtered_children)\n\n for child in filtered_children:\n if filtered_children[child][\"is_good\"]:\n # check the cache\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n good_children.append(child)\n\n logger.info(f\"Generated {len(good_children)} 'good' children.\")\n self.data.all_good_selfies_children = good_children\n return good_children, all_filtered_children\n\n def random_selfies(self, *, n_symbols: int = 100, n_molecules: int = 100):\n\n good_children = []\n rand_selfies_gen = random_selfies_generator(n_symbols=n_symbols)\n self.data.all_good_selfies_children = []\n\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n\n while len(good_children) < n_molecules:\n child_mols = [\n Chem.MolFromSmiles(next(rand_selfies_gen))\n for i in range(n_molecules - len(good_children))\n ]\n # filter children\n filtered_children = apply_filter(\n filter_params,\n child_mols,\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n\n for child in filtered_children:\n if filtered_children[child][\"is_good\"]:\n # check the cache\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n good_children.append(child)\n\n self.data.all_good_selfies_children = good_children\n return good_children\n\n def scan_selfies(self, safe_mode: bool = False):\n\n \"\"\"\n Return all possible single substitution children for all the seeds.\n \"\"\"\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n good_children = []\n self.data.all_good_scanner_children = []\n all_filtered_children = {}\n\n for seed in self.data.seed_smiles:\n children = selfies_scanner(parent_smiles=seed, safe_mode=safe_mode)\n if len(children) == 0:\n continue\n if self.data.track_heritage:\n self.data.heritage[seed] += children\n\n filtered_children = apply_filter(\n filter_params,\n [Chem.MolFromSmiles(child) for child in children],\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n all_filtered_children.update(filtered_children)\n\n for child in filtered_children:\n if filtered_children[child][\"is_good\"]:\n # check the cache\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n good_children.append(child)\n\n self.data.all_good_scanner_children = good_children\n return good_children, all_filtered_children\n\n def derive_gb(self, n_children: int = 100, representation=\"selfies\"):\n\n assert len(self.data.seed_smiles) > 0\n children = []\n good_children = []\n if representation == \"selfies\":\n self.data.all_good_selfies_gb_children = []\n crossover_fn = selfies_crossover_gb\n mutation_fn = selfies_mutate_gb\n elif representation == \"smiles\":\n self.data.all_good_smiles_gb_children = []\n crossover_fn = crossover_gb\n mutation_fn = mutate_gb\n else:\n raise ValueError(\n 'Must specify derivation kind as one of \"smiles\" or \"selfies\"'\n )\n\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n\n # parent_a_smiles, parent_b_smiles = (None, None) # not used. Delete?\n if len(self.data.seed_smiles) > 1:\n do_crossover = True\n new_child = None\n else:\n do_crossover = False\n new_child = self.data.seed_mols[0]\n\n for _ in range(n_children):\n if do_crossover:\n parent_a, parent_b = random.sample(self.data.seed_mols, 2)\n new_child = crossover_fn(parent_a, parent_b)\n if new_child is not None:\n mutated_child = mutation_fn(new_child)\n if mutated_child is None:\n continue\n else:\n continue\n children.append(mutated_child)\n\n filtered_children = apply_filter(\n filter_params,\n children,\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n\n # bugfix for empty strings\n try:\n del filtered_children[\"\"]\n except KeyError:\n pass\n\n for child in filtered_children:\n if filtered_children[child][\"is_good\"]:\n # check the cache\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n good_children.append(child)\n\n if representation == \"smiles\":\n self.data.all_good_smiles_gb_children = good_children\n else:\n self.data.all_good_selfies_gb_children = good_children\n logger.info(f\"Generated {len(good_children)} 'good' children.\")\n return good_children, filtered_children\n\n def set_fragment_source_db(self, frag_db):\n\n \"\"\"\n set the location for the fragment database that is used to mate molecules\n :param frag_db:\n :return:\n \"\"\"\n\n self.data.fragment_source_db = frag_db\n return 1\n\n def set_crem_source_db(self, crem_db: str):\n\n \"\"\"\n set the location for the fragment database that is used by crem\n :param crem_db:\n :return:\n \"\"\"\n self.data.crem_source_db = crem_db\n return 1\n\n def _process_seeds_for_brics(self):\n \"\"\"\n This function parses the seed molecules and gets the BRICS fragments they make, then cleans them\n :return:\n \"\"\"\n logger.info(\"Processing seeds to create scaffold fragments:\")\n\n self.data.seed_frag_db = f\"seed_frags_{uuid.uuid4()}.db\"\n self.data.seed_frags = []\n\n # Databases are used in lieu of alternatives (like dataframes) in order to operate on larger datasets\n # where memory may be a bottleneck, and to ensure that only one source of truth exists for performing\n # this calculation.\n libgen(\n self.data.seed_mols, self.data.seed_frag_db\n ) # libgen is the basic command that generates frag libraries\n seed_frag_db = SqliteDatabase(\n self.data.seed_frag_db\n ) # load the db we just made\n Fragment, Heritage, _, _ = lib_read(\n seed_frag_db\n ) # we only care about fragment and heritage at this point\n seed_frag_db.connect()\n\n # get all the fragments from the user molecule\n user_frags = (\n Fragment.select()\n .join(Heritage, on=Heritage.frag)\n .where(\n Heritage.parent != Heritage.frag\n ) # only get fragments, not intact molecules\n .order_by(Fragment.frag_coeff.desc())\n ) # largest and most complex frags first\n\n # for every fragment from the user provided parent mol\n for user_frag in user_frags:\n\n # we want to ignore really small fragments, by counting atom symbols\n smaller_smile = re.sub(\n r\"\\[[0-9]+\\*\\]\", \"\", user_frag.smile\n ) # ignore pseudoatoms\n smaller_smile = re.sub(\n r\"[0-9]\", \"\", smaller_smile\n ) # ignore numbers in general\n smaller_smile = re.sub(\n r\"[\\(\\)=#@\\-\\]\\[]+\", \"\", smaller_smile\n ) # ignore a bunch of other symbols\n if len(smaller_smile) < 4: # if there are less than four atoms\n logger.warning(f\"Skipping user_frag {user_frag.smile} due to size.\")\n continue\n\n # using this fragment and the whole parent molecule, estimate the \"missing\" FC and size\n try:\n parent = Heritage.get(Heritage.frag_id == user_frag.id).parent\n # todo: actual exception is deriver.lib_read.FragmentDoesNotExist, check if we can except just this case\n except Exception as e: # pylint: disable=broad-except\n logger.warning(f\"Encountered exception {e}\")\n logger.warning(\n \"If this exception describes a missing parent in the Heritage table, this bug\"\n \"is known and is being handled as intended.\"\n )\n continue\n\n # if parent is not None:\n missing_piece_fc = (\n parent.frag_coeff - user_frag.frag_coeff\n ) - 1.0 # -1.0 because two pieces combine\n missing_piece_len = len(parent.smile) - len(\n user_frag.smile\n ) # approximation\n # else:\n # missing_piece_fc = 3.0 # approximation\n # missing_piece_len = 40 # approximation\n\n # this is what we are going to keep\n seed_frag = (\n user_frag.smile,\n user_frag.num_pseudo_atoms,\n missing_piece_fc,\n missing_piece_len,\n parent.smile,\n )\n\n self.data.seed_frags.append(seed_frag)\n\n seed_frag_db.close()\n os.remove(\n self.data.seed_frag_db\n ) # we don't really care to keep the seed fragment database\n logger.info(\n f\"Done! There are {len(self.data.seed_frags)} seed fragments ready to be mated.\"\n )\n # it is faster to keep these in memory rather than using the database\n\n def derive_brics(self, n_children: int = 100, permissivity: float = 1.0):\n \"\"\"\n\n :param n_children: How many children do you want, in total. This is an approximation, not exact.\n :param permissivity: How unlike the parent molecules is the child allowed to be, higher is generally larger\n :return: (all_good_children [a list of smiles], all_filtered_children [a dict of values about the molecules])\n \"\"\"\n # process the seeds\n self._process_seeds_for_brics()\n\n # get the \"maximum number of children\" per fragment\n n_seed_frags = len(self.data.seed_frags)\n if n_seed_frags == 0:\n logger.warning(\"No seed fragments! Cannot derive brics from these seeds.\")\n return [], {}\n\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n\n if n_children < n_seed_frags:\n children_per_seed_frag = 1\n else:\n children_per_seed_frag = round(n_children / n_seed_frags) + 1\n\n logger.info(\n f\"Creating/reading a fragment index for {self.data.fragment_source_db}\"\n )\n # generate the frag index once, first, so it doesn't get generated in each pool process\n # this index serves to dramatically speed up queries\n frag_index(self.data.fragment_source_db)\n\n # again this is more of a guideline\n logger.info(\n f\"Mating to create {children_per_seed_frag} children per seed frag.\"\n )\n\n all_filtered_children = (\n dict()\n ) # the filter returns a dictionary of calculated pk values and filter status\n\n for (\n seed_frag_smile,\n seed_frag_num_pa,\n missing_p_fc,\n missing_p_len,\n parent_smile,\n ) in self.data.seed_frags:\n try:\n res = mate(\n self.data.fragment_source_db,\n seed_frag_smile,\n seed_frag_num_pa,\n missing_p_fc,\n missing_p_len,\n permissivity,\n children_per_seed_frag,\n filter_params,\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n\n (\n _,\n filter_values,\n ) = res # we only care about the filter dict, since it has everything\n all_filtered_children.update(filter_values) # update our master dict\n if self.data.track_heritage:\n self.data.heritage[parent_smile] += list(\n filter_values.keys()\n ) # this keeps track of heritage\n except IndexError as e:\n # This bug has never really been explored that much.\n logger.warning(\n f\"Error when trying to mate a molecule, ignoring this molecule. Error: {e}\"\n )\n\n all_good_children = []\n self.data.all_good_brics_children = (\n []\n ) # every time you call `derive_brics` it deletes any old results\n for child in all_filtered_children:\n if all_filtered_children[child][\"is_good\"]:\n # check the cache of previously seen molecules (which we want to avoid reproducing)\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n all_good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n # there is no provided list of molecules to skip\n all_good_children.append(child)\n\n logger.info(\n f\"Generated {len(self.data.all_good_brics_children)} 'good' children.\"\n )\n self.data.all_good_brics_children = all_good_children\n return all_good_children, all_filtered_children\n\n def derive_local_space(\n self, approx_children_per_seed: int = 1000, min_inc: int = -2, max_inc: int = 2\n ):\n\n if self.data.crem_source_db is None:\n raise AttributeError(\n \"No crem source db. Please use `.set_crem_source_db()` to provide a source db. \"\n \"See readme for more information.\"\n )\n\n if self.data.filter:\n filter_params = self.data.filter_params\n else:\n filter_params = None\n\n children = []\n good_children = []\n # first we make the molecules by using grow to replace hydrogens, and mutate to do everything else\n for i, seed_mol in enumerate(self.data.seed_mols):\n logger.info(f\"Growing children for {self.data.seed_smiles[i]}:\")\n grown_children_smiles = crem_grow(\n seed_mol, self.data.crem_source_db, return_mol=False\n )\n grown_children_mols = [\n Chem.MolFromSmiles(smile, sanitize=True)\n for smile in grown_children_smiles\n ]\n children.extend(grown_children_mols)\n logger.info(\"Done!\")\n\n logger.info(f\"Mutating children for {self.data.seed_smiles[i]}:\")\n mutate_children_smiles = crem_mutate(\n seed_mol,\n self.data.crem_source_db,\n return_mol=False,\n max_replacements=approx_children_per_seed,\n min_size=1,\n max_size=5,\n min_inc=min_inc,\n max_inc=max_inc,\n )\n mutate_children_mols = [\n Chem.MolFromSmiles(smile, sanitize=True)\n for smile in mutate_children_smiles\n ]\n children.extend(mutate_children_mols)\n logger.info(\"Done!\")\n\n if self.data.filter:\n logger.info(\"Applying filters to local space children:\")\n\n filtered_children = apply_filter(\n filter_params,\n children,\n self.data.must_have_patterns,\n self.data.must_not_have_patterns,\n )\n\n # bugfix for empty strings\n try:\n del filtered_children[\"\"]\n except KeyError:\n pass\n\n logger.info(\"Done!\")\n\n for child in filtered_children:\n if filtered_children[child][\"is_good\"]:\n # check the cache\n if self.data.filter_molecules:\n if child not in self.data.filter_molecules:\n good_children.append(child)\n else:\n logger.debug(f\"skipping previously seen molecule: {child}\")\n else:\n good_children.append(child)\n\n self.data.all_good_local_children = good_children\n logger.info(\n f\"Generated {len(self.data.all_good_local_children)} 'good' children.\"\n )\n\n return good_children, filtered_children\n", "sub_path": "src/deriver/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 31499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "config.logger.info", "line_number": 35, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 35, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 50, "usage_type": "call"}, {"api_name": "config.drug_like_params", "line_number": 50, "usage_type": "argument"}, {"api_name": "collections.defaultdict", "line_number": 61, "usage_type": "call"}, {"api_name": "rdkit.Chem.rdchem", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 80, "usage_type": "name"}, {"api_name": "rdkit.Chem.MolToSmiles", "line_number": 84, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 84, "usage_type": "name"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 91, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 91, "usage_type": "name"}, {"api_name": "rdkit.Chem.MolToSmiles", "line_number": 92, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 92, "usage_type": "name"}, {"api_name": "config.logger.error", "line_number": 96, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 125, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 133, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 133, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 147, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 147, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 148, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 148, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 152, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 152, "usage_type": "name"}, {"api_name": "child_filter.get_filter_values", "line_number": 157, "usage_type": "call"}, {"api_name": "config.logger.info", "line_number": 159, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 159, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 160, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 160, "usage_type": "name"}, {"api_name": "config.logger.error", "line_number": 175, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 175, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 178, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 178, "usage_type": "name"}, {"api_name": "config.logger.error", "line_number": 185, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 185, "usage_type": "name"}, {"api_name": "config.logger.error", "line_number": 191, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 191, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 210, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 210, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 312, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 312, "usage_type": "name"}, {"api_name": "selfies_methods.selfies_substitution", "line_number": 322, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 332, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 332, "usage_type": "name"}, {"api_name": "selfies_methods.selfies_insertion", "line_number": 335, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 345, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 345, "usage_type": "name"}, {"api_name": "selfies_methods.selfies_deletion", "line_number": 348, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 358, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 358, "usage_type": "name"}, {"api_name": "child_filter.apply_filter", "line_number": 362, "usage_type": "call"}, {"api_name": "config.logger.debug", "line_number": 377, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 377, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 381, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 381, "usage_type": "name"}, {"api_name": "selfies_methods.random_selfies_generator", "line_number": 388, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 398, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 398, "usage_type": "name"}, {"api_name": "child_filter.apply_filter", "line_number": 402, "usage_type": "call"}, {"api_name": "config.logger.debug", "line_number": 416, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 416, "usage_type": "name"}, {"api_name": "selfies_methods.selfies_scanner", "line_number": 437, "usage_type": "call"}, {"api_name": "child_filter.apply_filter", "line_number": 443, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 445, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 445, "usage_type": "name"}, {"api_name": "config.logger.debug", "line_number": 458, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 458, "usage_type": "name"}, {"api_name": "jensen_selfies_crossover.crossover", "line_number": 472, "usage_type": "name"}, {"api_name": "jensen_selfies_mutate.mutate", "line_number": 473, "usage_type": "name"}, {"api_name": "jensen_crossover.crossover", "line_number": 476, "usage_type": "name"}, {"api_name": "jensen_mutate.mutate", "line_number": 477, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 498, "usage_type": "call"}, {"api_name": "child_filter.apply_filter", "line_number": 508, "usage_type": "call"}, {"api_name": "config.logger.debug", "line_number": 528, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 528, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 536, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 536, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 565, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 565, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 567, "usage_type": "call"}, {"api_name": "fragment.libgen", "line_number": 573, "usage_type": "call"}, {"api_name": "peewee.SqliteDatabase", "line_number": 576, "usage_type": "call"}, {"api_name": "lib_read.lib_read", "line_number": 579, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 598, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 601, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 604, "usage_type": "call"}, {"api_name": "config.logger.warning", "line_number": 608, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 608, "usage_type": "name"}, {"api_name": "config.logger.warning", "line_number": 616, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 616, "usage_type": "name"}, {"api_name": "config.logger.warning", "line_number": 617, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 617, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 646, "usage_type": "call"}, {"api_name": "config.logger.info", "line_number": 649, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 649, "usage_type": "name"}, {"api_name": "config.logger.warning", "line_number": 667, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 667, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 680, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 680, "usage_type": "name"}, {"api_name": "fragment_index.frag_index", "line_number": 685, "usage_type": "call"}, {"api_name": "config.logger.info", "line_number": 688, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 688, "usage_type": "name"}, {"api_name": "mate.mate", "line_number": 704, "usage_type": "call"}, {"api_name": "config.logger.warning", "line_number": 728, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 728, "usage_type": "name"}, {"api_name": "config.logger.debug", "line_number": 743, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 743, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 748, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 748, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 773, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 773, "usage_type": "name"}, {"api_name": "crem.crem.grow_mol", "line_number": 774, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 778, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 778, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 782, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 782, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 784, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 784, "usage_type": "name"}, {"api_name": "crem.crem.mutate_mol", "line_number": 785, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 796, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 796, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 800, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 800, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 803, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 803, "usage_type": "name"}, {"api_name": "child_filter.apply_filter", "line_number": 805, "usage_type": "call"}, {"api_name": "config.logger.info", "line_number": 818, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 818, "usage_type": "name"}, {"api_name": "config.logger.debug", "line_number": 827, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 827, "usage_type": "name"}, {"api_name": "config.logger.info", "line_number": 832, "usage_type": "call"}, {"api_name": "config.logger", "line_number": 832, "usage_type": "name"}]} +{"seq_id": "166823333", "text": " #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 4 10:51:54 2018\n\n@author: matthewszhang\n\"\"\"\nimport time\nimport os\nimport os.path as osp\nimport numpy as np\nfrom baselines import logger\nfrom collections import deque\nfrom baselines.feudal.models import I2AModel\nfrom baselines.feudal.runners import I2ARunner\n\nPATH=\"tmp/build/graph\"\n\ndef package_environment(states, actions, rewards):\n train_states = []\n train_actions = []\n train_rewards = []\n train_nstates = []\n \n for (state, action, reward) in states, actions, rewards:\n train_states.append(state[:-1])\n train_nstates.append(state[1:])\n train_actions.append(action[:-1])\n train_rewards.append(reward[:-1])\n \n (np.asarray(arr).reshape((-1, arr.shape[-1])))\n\ndef pack(arr):\n try:\n arr = np.vstack(arr)\n if arr.shape[0]==1:\n return np.flatten(arr)\n else: return arr\n except:\n return np.hstack(arr)\n\ndef constfn(val):\n def f(_):\n return val\n return f\n\ndef sbi(arr, dones):\n nbatch=dones.shape[0]\n abd=[]\n si=0\n for t in range(nbatch):\n if dones[t] == 1:\n abd.append(arr[si:t+1])\n si=t+1\n elif t==nbatch-1:\n abd.append(arr[si:])\n return abd\n\ndef mcret(actions, rews, dones, vals, lam=0.95, gam=0.99):\n mb_returns = np.zeros_like(rews)\n mb_advs = np.zeros_like(rews)\n lastgaelam = 0\n nsteps = rews.shape[0]\n nextvalues=vals[-1:,]\n for t in reversed(range(nsteps)):\n if t == nsteps - 1:\n nextnonterminal = 0\n nextvalues = 0 # assume last is terminal -> won't be too significant unless tstep is large\n else:\n nextnonterminal = 1.0 - dones[t+1]\n nextvalues = vals[t+1]\n delta = rews[t] + gam * nextvalues * nextnonterminal - vals[t]\n mb_advs[t] = lastgaelam = delta + gam * lam * nextnonterminal * lastgaelam\n \n mb_returns = mb_advs + vals\n return mb_returns, mb_advs\n\ndef learn(*, policy, env, tsteps, nsteps, encoef, lr, cliphigh, clipinc, vcoef,\n mgn, gmax, ginc, lam, nhier, nmb, noe, ngmin, nginc, bmin, bmax, nhist,\n recurrent, val, max_len=100, save_interval=0, log_interval=1, load_path=None):\n \n if isinstance(lr, float): lr = constfn(lr)\n else: assert callable(lr)\n if isinstance(cliphigh, float):\n arr = np.asarray([cliphigh*(clipinc**i) for i in range(nhier)], dtype=np.float32) \n cliprange = constfn(arr)\n else: \n def cr(t):\n arr = [cliphigh(t)*(clipinc(t)**i) for i in range(nhier)]\n return np.asarray(arr, dtype=np.float32)\n cliprange = cr \n\n nenvs = env.num_envs\n ob_space = env.observation_space\n ac_space = env.action_space\n nbatch = (nenvs * nsteps)\n nbatch_train = nbatch // nmb\n \n make_model = lambda : I2AModel(policy, ob_space, ac_space, max_grad=mgn,\n encoef=encoef, vcoef=vcoef, klcoef=klcoef, aggregator='concat',\n traj_len = tl, nh=nh)\n if save_interval and logger.get_dir():\n import cloudpickle\n with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:\n fh.write(cloudpickle.dumps(make_model))\n model = make_model()\n if load_path is not None:\n model.load(load_path)\n \n runner = I2ARunner(env=env, model=model, nsteps=nsteps)\n epinfobuf = deque(maxlen=100)\n tfirststart = time.time()\n nupdates = tsteps//nbatch\n \n if not val:\n vre = np.zeros((nhier), dtype=np.float32)\n val_temp = 0.9\n \n for update in range(1, nupdates+1):\n tstart = time.time()\n frac = 1.0 - (update - 1.0) / nupdates\n lrnow = lr(frac)\n cliprangenow = cliprange(frac)\n obs, rewards, actions, dones, epinfos = runner.run()\n epinfobuf.extend(epinfos)\n mblossvals = []\n obs, actions, rewards, dones = (sbi(arr, dones) for arr in\n (obs, actions, rewards, dones))\n env_train_set = package_environment(obs, actions, rewards)\n if not recurrent:\n nlps, vfs = model.info(obs, actions)\n obs, actions, rewards, dones, nlps, vfs = \\\n map(pack,(obs,actions,rewards,dones,nlps,vfs))\n if not val:\n vre = vre * val_temp + np.mean(rewards, axis=0) * (1-val_temp)\n vfs = np.reshape(np.repeat(vre, nsteps), [nsteps, nhier])\n rewards, advs = mcret(actions, rewards, dones, vfs, lam=lam, gam=model.gam)\n actions = actions.flatten() #safety\n inds = np.arange(nbatch)\n for _ in range(noe):\n np.random.shuffle(inds)\n for start in range(0, nbatch, nbatch_train):\n end = start + nbatch_train\n mbinds = inds[start:end]\n slices = (arr[mbinds] for arr in (obs, actions, nlps, advs, rewards, vfs)) \n mblossvals.append(model.train(lrnow, cliprangenow, *slices))\n\n else: # recurrent version\n pass\n \n lossvals = np.mean(mblossvals, axis=0)\n tnow = time.time()\n fps = int(nbatch / (tnow - tstart))\n if update % log_interval == 0 or update == 1:\n logger.logkv(\"serial_timesteps\", update*nsteps)\n logger.logkv(\"nupdates\", update)\n logger.logkv(\"total_timesteps\", update*nbatch)\n logger.logkv(\"fps\", fps)\n logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))\n logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))\n logger.logkv('time_elapsed', tnow - tfirststart)\n for (lossval, lossname) in zip(lossvals, model.loss_names):\n logger.logkv(lossname, lossval)\n logger.dumpkvs()\n if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():\n checkdir = osp.join(logger.get_dir(), 'checkpoints')\n os.makedirs(checkdir, exist_ok=True)\n savepath = osp.join(checkdir, '%.5i'%update)\n print('Saving to', savepath)\n model.save(savepath)\n env.close()\n\ndef safemean(xs):\n return np.nan if len(xs) == 0 else np.mean(xs)\n", "sub_path": "baselines/feudal/i2a.py", "file_name": "i2a.py", "file_ext": "py", "file_size_in_byte": 6286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.flatten", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "baselines.feudal.models.I2AModel", "line_number": 99, "usage_type": "call"}, {"api_name": "baselines.logger.get_dir", "line_number": 102, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "name"}, {"api_name": "baselines.logger.get_dir", "line_number": 104, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 104, "usage_type": "name"}, {"api_name": "cloudpickle.dumps", "line_number": 105, "usage_type": "call"}, {"api_name": "baselines.feudal.runners.I2ARunner", "line_number": 110, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 111, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 116, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 151, "usage_type": "call"}, {"api_name": "time.time", "line_number": 152, "usage_type": "call"}, {"api_name": "baselines.logger.logkv", "line_number": 155, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 155, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 156, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 156, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 157, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 157, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 158, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 158, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 159, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 159, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 160, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 160, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 161, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 161, "usage_type": "name"}, {"api_name": "baselines.logger.logkv", "line_number": 163, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 163, "usage_type": "name"}, {"api_name": "baselines.logger.dumpkvs", "line_number": 164, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 164, "usage_type": "name"}, {"api_name": "baselines.logger.get_dir", "line_number": 165, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "name"}, {"api_name": "baselines.logger.get_dir", "line_number": 166, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 166, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "338668698", "text": "from django.shortcuts import render, redirect\nfrom .models import *\nfrom df_user import user_decorator\nfrom django.http import JsonResponse\n\n\n# Create your views here.\n@user_decorator.login\ndef cart(request):\n user_id = request.session.get('user_id')\n carts = CartInfo.objects.filter(user_id=int(user_id))\n context = {'carts': carts}\n return render(request, 'df_cart/cart.html', context)\n\n\n@user_decorator.login\ndef add(request, gid, count):\n uid = request.session.get('user_id')\n gid = int(gid)\n count = int(count)\n carts = CartInfo.objects.filter(user_id=uid, goods_id=gid)\n if len(carts) >= 1:\n cart0 = carts[0]\n cart0.count += count\n else:\n cart0 = CartInfo()\n cart0.user_id = uid\n cart0.goods_id = gid\n cart0.count = count\n cart0.save()\n\n if request.is_ajax():\n count = CartInfo.objects.filter(user=request.session['user_id']).count()\n return JsonResponse({'count': count})\n else:\n return redirect('/cart/')\n\n\n@user_decorator.login\ndef edit(request, cart_id, count):\n try:\n cart0 = CartInfo.objects.get(pk=int(cart_id))\n cart0.count = int(count)\n data = {'ok': 0}\n cart0.save()\n except Exception as e:\n data = {'ok': count}\n return JsonResponse(data)\n\n\n@user_decorator.login\ndef delete(request, cart_id):\n try:\n cart0 = CartInfo.objects.get(pk=int(cart_id))\n cart0.delete()\n data = {'ok': 1}\n except Exception as e:\n data = {'ok': 0}\n return JsonResponse(data)\n", "sub_path": "df_cart/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "df_user.user_decorator.login", "line_number": 8, "usage_type": "attribute"}, {"api_name": "df_user.user_decorator", "line_number": 8, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "df_user.user_decorator.login", "line_number": 16, "usage_type": "attribute"}, {"api_name": "df_user.user_decorator", "line_number": 16, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "df_user.user_decorator.login", "line_number": 39, "usage_type": "attribute"}, {"api_name": "df_user.user_decorator", "line_number": 39, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "df_user.user_decorator.login", "line_number": 51, "usage_type": "attribute"}, {"api_name": "df_user.user_decorator", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "609285759", "text": "import logging\n\nfrom qwdeploy import exception\n\nLOG = logging.getLogger(__name__)\n\n\nclass Deploy(object):\n \"\"\"Deploy a Stack\"\"\"\n\n name = 'deploy'\n help = __doc__\n params = []\n\n def run(self):\n raise exception.QwdeployError(\"command not implemented\")\n", "sub_path": "qwdeploy/commands/deploy.py", "file_name": "deploy.py", "file_ext": "py", "file_size_in_byte": 272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "qwdeploy.exception.QwdeployError", "line_number": 16, "usage_type": "call"}, {"api_name": "qwdeploy.exception", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "436405346", "text": "from flask_restplus import Api, Resource, fields\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom flask import Flask, url_for, jsonify\nfrom elasticsearch import Elasticsearch\nimport json\n\n### Setup elastic search connection\nes_host = {\"host\": \"elasticsearch1\", \"port\": 9200}\nes = Elasticsearch([es_host], retry_on_timeout=True, maxsize=25)\n\napp = Flask(__name__)\napi = Api(app,\n\t version='1.0', \n title='Swagger Test Page for Elasticsearch \\\"Geoname Data\\\" Search Templates', \n description='Test Page for \\\"Geoname Data\\\" Searches', \n prefix=\"/v1\",\n contact=\"john@swarmee.net\",\n contact_url=\"www.swarmee.net\"\n )\napp.wsgi_app = ProxyFix(app.wsgi_app)\n\nns = api.namespace('city', description='Simple Endpoints to Test Elastic API operations')\n\nquery1 = api.model('query1', {\n 'typeAheadText': fields.String(default='Syd', required=True, description='Type Ahead Text'),\n 'typeAheadTemplate': fields.String(default='typeAhead', required=True, description='Template for Type Ahead'),\n })\n\nquery2 = api.model('query2', {\n 'nearGeoNameId': fields.String(default='2293507', required=True, description='Search For Cities Near This GeoNameId'),\n 'nearGeoNameIdDistance': fields.String(default='100km', required=True, description='Distance From City to Include in results') \n })\n\n@ns.route('/typeAhead')\nclass typeAhead(Resource):\n @ns.expect(query1)\n def post(self):\n typeAheadText = api.payload['typeAheadText']\n typeAheadTemplate = api.payload['typeAheadTemplate']\n abc = {'id': typeAheadTemplate ,'params': {'typeAheadText': typeAheadText}}\n resp = es.search_template(index=\"city\", body=abc, filter_path=['suggest.*suggestion.options.text','suggest.*suggestion.options._id'])\n return jsonify(resp) \n\n@ns.route('/typeAhead/Full')\nclass typeAheadFull(Resource):\n @ns.expect(query1)\n def post(self):\n typeAheadText = api.payload['typeAheadText']\n typeAheadTemplate = api.payload['typeAheadTemplate']\n abc = {'id': typeAheadTemplate ,'params': {'typeAheadText': typeAheadText}}\n resp = es.search_template(index=\"city\", body=abc)\n## resp['matches'] = resp.pop('hits') \n## print(resp)\n return jsonify(resp)\n\n#### General search of geoname data using search term\n\n@ns.route('/search/')\nclass productSearch(Resource):\n def get(self, searchTerms):\n simpleSearchResponse = es.search(index=\"city\", body=\"{\\\"query\\\": {\\\"simple_query_string\\\": {\\\"query\\\": \\\"%s\\\"}}}\" % searchTerms)\n return jsonify(simpleSearchResponse) \n\n#### Search geoname data by geonameId\n\n@ns.route('/search/')\nclass geonameIdSearch(Resource):\n def get(self, geonameId):\n geonameIdSearchResponse = es.search(index=\"city\", body=\"{\\\"query\\\": {\\\"match\\\": {\\\"_id\\\": \\\"%s\\\"}}}\" % geonameId)\n return jsonify(geonameIdSearchResponse) \n\n\n#### finds records near specific geo point - based on supplied distance ####\n\n@ns.route('/search/NearGeoNameId')\nclass nearGeonameId(Resource):\n @ns.expect(query2)\n def post(self):\n nearGeoNameId = api.payload['nearGeoNameId']\n nearGeoNameIdDistance = api.payload['nearGeoNameIdDistance']\n nearGeonameIdSearchResponse = es.search(index=\"city\", body=\"{\\\"query\\\": {\\\"match\\\": {\\\"_id\\\": \\\"%s\\\"}}}\" % nearGeoNameId, filter_path=['hits.hits._source.location.*'])\n for row in nearGeonameIdSearchResponse[\"hits\"][\"hits\"]:\n getLatLon = row[\"_source\"][\"location\"]\n lon = getLatLon['lon']\n lat = getLatLon['lat']\n abc = {'id': 'nearGeoNameId' ,'params': {'lon': lon, 'lat': lat, 'distance' : nearGeoNameIdDistance }}\n resp3 = es.search_template(index=\"city\", body=abc, filter_path=['hits.total', 'hits.hits._source.asciiName', 'hits.hits._source.location', 'hits.hits._source.geonameId'])\n# finalResponse = []\n# for row in resp3[\"hits\"][\"hits\"]:\n# finalResponse.append(row[\"_source\"])\n return jsonify(resp3) \n\n\n#### counts the number of city records stored in elastic ####\n@ns.route('/count')\nclass geoname(Resource):\n def get(self):\n resp = es.count(index=\"city\", filter_path=['-took','-timed_out','-_shards'])\n return resp\n\n#### provides indication if the elastic backend is healthy ####\n@ns.route('/backEndHealth')\nclass backEndHealth(Resource):\n def get(self):\n resp = es.cluster.health(filter_path=['status'])\n return resp\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "elastic-stack-geonames-cities/geonames-cities-api/geonames-cities-api-using-payload.py", "file_name": "geonames-cities-api-using-payload.py", "file_ext": "py", "file_size_in_byte": 4546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "elasticsearch.Elasticsearch", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_restplus.Api", "line_number": 12, "usage_type": "call"}, {"api_name": "werkzeug.contrib.fixers.ProxyFix", "line_number": 20, "usage_type": "call"}, {"api_name": "flask_restplus.fields.String", "line_number": 25, "usage_type": "call"}, {"api_name": "flask_restplus.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "flask_restplus.fields.String", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_restplus.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "flask_restplus.fields.String", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_restplus.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_restplus.fields.String", "line_number": 31, "usage_type": "call"}, {"api_name": "flask_restplus.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 54, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 62, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 91, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 96, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "400967713", "text": "#!/usr/bin/env python\nimport rospy\nimport numpy\nimport tf\nimport tf2_ros\nimport geometry_msgs.msg\n\ndef message_from_transform(T):\n\tmsg = geometry_msgs.msg.Transform()\n\tq = tf.transformations.quaternion_from_matrix(T)\n\ttranslation = tf.transformations.translation_from_matrix(T)\n\tmsg.translation.x = translation[0]\n\tmsg.translation.y = translation[1]\n\tmsg.translation.z = translation[2]\n\tmsg.rotation.x = q[0]\n\tmsg.rotation.y = q[1]\n\tmsg.rotation.z = q[2]\n\tmsg.rotation.w = q[3]\n\treturn msg\n\ndef publish_transforms():\n\tT1 = tf.transformations.concatenate_matrices(\n\t\ttf.transformations.translation_matrix((1.0,1.0,0.0)),\n\t\ttf.transformations.quaternion_matrix(\n\t\t\ttf.transformations.quaternion_from_euler(1.0,1.0,1.0)\t\t\n\t\t)\t\t\n\t)\n\tT1_stamped = geometry_msgs.msg.TransformStamped()\n\tT1_stamped.header.stamp = rospy.Time.now()\n\tT1_stamped.header.frame_id = \"world\"\n\tT1_stamped.child_frame_id = \"F1\"\n\tT1_stamped.transform = message_from_transform(T1)\n\tbr.sendTransform(T1_stamped)\n\n\tT2 = tf.transformations.concatenate_matrices(\n\t\ttf.transformations.translation_matrix((1.0,0.0,0.0)),\n\t\ttf.transformations.quaternion_matrix(\n\t\t\ttf.transformations.quaternion_about_axis(1.57,(1,0,0))\t\t\n\t\t)\t\t\n\t)\n\tT2_stamped = geometry_msgs.msg.TransformStamped()\n\tT2_stamped.header.stamp = rospy.Time.now()\n\tT2_stamped.header.frame_id = \"F1\"\n\tT2_stamped.child_frame_id = \"F2\"\n\tT2_stamped.transform = message_from_transform(T2)\n\tbr.sendTransform(T2_stamped)\n\n\t# T2_inverse = tf.transformations.inverse_matrix(T2)\n\t# T3_stamped = geometry_msgs.msg.TransformStamped()\n\t# T3_stamped.header.stamp = rospy.Time.now()\n\t# T3_stamped.header.frame_id = \"F2\"\n\t# T3_stamped.child_frame_id = \"F3\"\n\t# T3_stamped.transform = message_from_transform(T2_inverse)\n\t# br.sendTransform(T3_stamped)\n\t\t\n\t# T1_inverse = tf.transformations.inverse_matrix(T1)\n\t# T4_stamped = geometry_msgs.msg.TransformStamped()\n\t# T4_stamped.header.stamp = rospy.Time.now()\n\t# T4_stamped.header.frame_id = \"F3\"\n\t# T4_stamped.child_frame_id = \"F4\"\n\t# T4_stamped.transform = message_from_transform(T1_inverse)\n\t# br.sendTransform(T4_stamped)\n\nif __name__ == \"__main__\":\n\trospy.init_node(\"tf2_examples\")\n\t\n\tbr = tf2_ros.TransformBroadcaster()\n\trospy.sleep(0.5)\n\n\twhile not rospy.is_shutdown():\n\t\tpublish_transforms()\n\t\trospy.sleep(0.5)\n", "sub_path": "myCode/catkin_ws/src/tf2_examples/scripts/tf2_examples.py", "file_name": "tf2_examples.py", "file_ext": "py", "file_size_in_byte": 2269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "geometry_msgs.msg.msg.Transform", "line_number": 9, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 9, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 9, "usage_type": "name"}, {"api_name": "tf.transformations.quaternion_from_matrix", "line_number": 10, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tf.transformations.translation_from_matrix", "line_number": 11, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tf.transformations.concatenate_matrices", "line_number": 22, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tf.transformations.translation_matrix", "line_number": 23, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_matrix", "line_number": 24, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 25, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 25, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg.TransformStamped", "line_number": 28, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 28, "usage_type": "name"}, {"api_name": "rospy.Time.now", "line_number": 29, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tf.transformations.concatenate_matrices", "line_number": 35, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tf.transformations.translation_matrix", "line_number": 36, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_matrix", "line_number": 37, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_about_axis", "line_number": 38, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 38, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg.TransformStamped", "line_number": 41, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 41, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 41, "usage_type": "name"}, {"api_name": "rospy.Time.now", "line_number": 42, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 65, "usage_type": "call"}, {"api_name": "tf2_ros.TransformBroadcaster", "line_number": 67, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 70, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "458971686", "text": "from products.models import Product\nfrom users.models import User\nfrom .models import OrderItem, Order\nfrom rest_framework import generics, status, permissions, pagination\nfrom core.permissions import *\nfrom rest_framework.response import Response\nfrom .serializers import *\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom cart.models import Cart\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.shortcuts import render\n\nfrom django.conf import settings\nimport stripe\nimport os\n\n# ?Pagination Class\nclass Pagination(pagination.PageNumberPagination):\n page_size = 100\n page_query_param = 'p'\n\n\n# ?Views\n\nclass UpdateOrder(generics.UpdateAPIView):\n queryset = Order.objects.all()\n serializer_class = UpdateOrderStatus\n permission_classes = [IsStaff]\n\n\n# *admin\nclass ListOrders(generics.ListAPIView):\n queryset = Order.objects.all()\n serializer_class = OrderSerializerListAdmin\n permission_classes = [IsStaff]\n filter_backends = [DjangoFilterBackend]\n filterset_fields = ['status']\n pagination_class = Pagination\n\n\n# *General\nclass Checkout(generics.CreateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n try:\n products = Cart.objects.all().filter(user=request.user.id)\n total = 0\n\n for i in products:\n product = Product.objects.all().filter(id=i.product_id).first()\n if product.deal:\n total += product.special_price * i.quantity\n else:\n total += product.price * i.quantity\n\n stripe.api_key = settings.STR_KEY\n charge = stripe.PaymentIntent.create(\n amount=int(total * 100),\n currency='MXN',\n description=f'Order from the user {request.user.id}',\n payment_method=request.data.get('id', None),\n confirm=True\n )\n\n if(charge['status'] == 'succeeded'):\n\n user = request.user\n direction = f\"{user.calle} #{user.exterior_number} {f'#{user.interior_number}' if user.interior_number else ''}, {user.colonia} {user.postalcode} - {user.estado}\"\n order = {\n \"total\": total,\n \"user\": request.user.id,\n \"direction\": direction\n }\n\n serializer = OrderSerializer(data=order)\n if serializer.is_valid():\n serializer.save()\n\n for i in products:\n total = 0\n product = Product.objects.all().filter(id=i.product_id).first()\n if product.deal:\n total = product.special_price * i.quantity\n else:\n total = product.price * i.quantity\n data = {\n \"total\": total,\n \"order\": serializer.data['id'],\n \"quantity\": i.quantity,\n \"product\": product.id\n }\n Product.objects.filter(id=product.id).update(\n popularity=product.popularity + 20 * i.quantity, stock=product.stock - i.quantity)\n # ?validating that other carts are in order related to the stock of the product\n cartProducts = Cart.objects.all()\n for c in cartProducts:\n p = Product.objects.filter(id=product.id).first()\n if p.stock == 0:\n c.delete()\n\n if c.quantity > p.stock:\n Cart.objects.filter(id=c.id).update(\n quantity=p.stock)\n\n if product.stock > 0:\n orderItem = OrderItemSerializer(data=data)\n if orderItem.is_valid():\n orderItem.save()\n products.delete()\n\n #? Sending Email\n context = {\n \"UserName\": request.user.nombre,\n \"orderId\": serializer.data['id'],\n \"total\": total,\n \"url\": settings.URL_FOR_EMAIL_ORDER,\n\n\n }\n template = get_template('orderAlert.html')\n content = template.render(context)\n try:\n email = EmailMultiAlternatives(\n 'Nueva Orden',\n 'Nueva Orden',\n settings.EMAIL_HOST_USER,\n [settings.EMAIL_HOST_USER]\n\n )\n email.attach_alternative(content, 'text/html')\n email.send()\n except Exception as e:\n print(e)\n\n return Response(status=status.HTTP_200_OK)\n\n except Exception as e:\n print(e)\n return Response(data={'error': 'El pago falló, porfavor revisa que tengas suficientes fondos'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ListUserOrders(generics.ListAPIView):\n permission_classes = [IsOwner, permissions.IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n orders = Order.objects.all().filter(user=request.user.id)\n serializer = OrderSerializerList(orders, many=True)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass RetrieveOrder(generics.RetrieveAPIView):\n permission_class = [IsOwner, permissions.IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n id = self.kwargs['pk']\n order = Order.objects.all().filter(id=id).first()\n if(order.user == request.user):\n\n serializer = OrderSerializerList(order)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n\nclass SolicitarFactura(generics.CreateAPIView):\n def post(self, request, *args, **kwargs):\n id = request.data['id']\n if id:\n order = Order.objects.all().filter(id=id).first()\n\n if order is None:\n return Response({'detail':'No encontrado'}, status = status.HTTP_404_NOT_FOUND)\n\n if order.factura:\n return Response({\"status\": \"Esta orden ya a solicitado una factura, si no la haz recibido, porfavor contactate al correo de atención al cliente\"}, status=status.HTTP_400_BAD_REQUEST)\n\n if order.user != request.user:\n return Response({\"status\": \"No tienes permiso para solicitar Factura sobre esta orden\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n data = request.data\n user = request.user\n context = {\n \"orderId\": data['id'],\n \"UserName\": data['nombre'],\n \"total\":order.total,\n \"email\":data['email'],\n \"direccion\":f\"{user.calle} #{user.exterior_number} {f'#{user.interior_number}' if user.interior_number else ''}, {user.colonia} {user.postalcode} - {user.estado}\",\n \"rfc\":data['rfc'],\n \"nombre\":data['nombre'],\n \"userEmail\": user.email\n\n }\n template = get_template('correo.html')\n content = template.render(context)\n try:\n email = EmailMultiAlternatives(\n f'El usuario {request.user.nombre} solicito una factura sobre el pedido #{order.id}',\n 'Factura',\n settings.EMAIL_HOST_USER,\n ['raulemilianomirandagtz@gmail.com'],\n )\n except Exception as e:\n print(e)\n\n\n email.attach_alternative(content, 'text/html')\n email.send()\n Order.objects.all().filter(id=id).update(factura=True)\n\n return Response({\"status\": \"La solicitud de factura se realizó correctamente\"}, status=status.HTTP_200_OK)\n", "sub_path": "core/orders/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.pagination", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 35, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 45, "usage_type": "name"}, {"api_name": "products.models", "line_number": 49, "usage_type": "name"}, {"api_name": "cart.models.Cart.objects.all", "line_number": 49, "usage_type": "call"}, {"api_name": "cart.models.Cart.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cart.models.Cart", "line_number": 49, "usage_type": "name"}, {"api_name": "products.models", "line_number": 52, "usage_type": "name"}, {"api_name": "products.models.Product.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "products.models.Product.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "products.models.Product", "line_number": 53, "usage_type": "name"}, {"api_name": "stripe.api_key", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STR_KEY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 59, "usage_type": "name"}, {"api_name": "stripe.PaymentIntent.create", "line_number": 60, "usage_type": "call"}, {"api_name": "stripe.PaymentIntent", "line_number": 60, "usage_type": "attribute"}, {"api_name": "products.models", "line_number": 82, "usage_type": "name"}, {"api_name": "products.models.Product.objects.all", "line_number": 84, "usage_type": "call"}, {"api_name": "products.models.Product.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "products.models.Product", "line_number": 84, "usage_type": "name"}, {"api_name": "products.models.Product.objects.filter", "line_number": 95, "usage_type": "call"}, {"api_name": "products.models.Product.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "products.models.Product", "line_number": 95, "usage_type": "name"}, {"api_name": "cart.models.Cart.objects.all", "line_number": 98, "usage_type": "call"}, {"api_name": "cart.models.Cart.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cart.models.Cart", "line_number": 98, "usage_type": "name"}, {"api_name": "products.models.Product.objects.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "products.models.Product.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "products.models.Product", "line_number": 100, "usage_type": "name"}, {"api_name": "cart.models.Cart.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "cart.models.Cart.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "cart.models.Cart", "line_number": 105, "usage_type": "name"}, {"api_name": "products.models.delete", "line_number": 112, "usage_type": "call"}, {"api_name": "products.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.conf.settings.URL_FOR_EMAIL_ORDER", "line_number": 119, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 119, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 123, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 126, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 129, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 129, "usage_type": "name"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 130, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 130, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 138, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 138, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 138, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 142, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 142, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 142, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 145, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 145, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 146, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 146, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 149, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 151, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 151, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 151, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 154, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 154, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 155, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 155, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 159, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 163, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 163, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 163, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 165, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 165, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 165, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 168, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 168, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 172, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 172, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 175, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 175, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 175, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 178, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 178, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 178, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 181, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 181, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 181, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 196, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 199, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 202, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 202, "usage_type": "name"}, {"api_name": "models.Order.objects.all", "line_number": 211, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 211, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 213, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 213, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 213, "usage_type": "name"}]} +{"seq_id": "159965973", "text": "from collections import deque\n\ndef wiki(xs):\n current_max_len = 0\n m = [0 for x in xs]\n preds = [0 for x in xs]\n\n longest = 0\n\n for i, x in enumerate(xs):\n lo, hi = 1, current_max_len\n mid = (lo + hi) // 2\n if xs[m[mid]] < x:\n lo = mid + 1\n else:\n hi = mid - 1\n\n longest, preds[i], m[lo] = (lo if lo > longest else longest,\n m[lo - 1],\n i)\n\n rv = deque([xs[m[longest]]])\n for x in reversed(xs):\n if rv[0] > x:\n rv.appendleft(x)\n\n return rv\n\nif __name__ == '__main__':\n output = wiki([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5,\n 13, 3, 11, 4, 5, 6, 7, 15, 8, 9])\n expected = [0, 2, 3, 4, 5, 6, 7, 8, 9]\n print(output)\n assert len(output) == len(expected)\n", "sub_path": "longest_increasing_subsequence.py", "file_name": "longest_increasing_subsequence.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.deque", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "549886257", "text": "'''\nUsing Reddit's api/v1/\n'''\nimport os\nimport requests\nimport requests.auth\nimport sys\nimport time\nfrom .local_settings import *\nfrom ..write_joke import *\n\n\n# Files to read and write\njokes = '/Users/joannejordan/Desktop/GitHub/dad-joke-ai/dadjokes-subreddit-\\\narchive/otherrjokes.csv'\nrecords = '/Users/joannejordan/Desktop/GitHub/dad-joke-ai/dadjokes-subreddit-\\\narchive/otherrecords.txt'\n\n# Set reddit user agent\nuser_agent = f\"{USER_AGENT} by {USERNAME}\"\n\n\ndef get_auth():\n '''Get authorization to use reddit's api\n '''\n # Steps presented in reddit's docs\n client_auth = requests.auth.HTTPBasicAuth(CLIENT_ID, CLIENT_SECRET)\n post_data = {\"grant_type\": \"password\", \"username\": USERNAME,\n \"password\": PASSWORD}\n headers = {\"User-Agent\": user_agent}\n response = requests.post(\"https://www.reddit.com/api/v1/access_token\",\n auth=client_auth, data=post_data, headers=headers)\n text = response.json()\n print(text)\n authorization = text['token_type'] + ' ' + text['access_token']\n # Save authorization to file\n variables = []\n # First, read old file\n with open('local_settings.py', 'r') as local:\n for line in local:\n variables.append(line)\n # Overwrite old file\n with open('local_settings.py', 'w') as local:\n for line in variables[:-1]:\n local.write(line + '\\n')\n local.write('AUTH = {authorization}')\n return authorization\n\n\ndef get_jokes_page(after):\n '''Requests jokes through reddit's online API, sidestepping PRAW's limit\n on the history of the instance\n '''\n # See if old authorization works\n headers = {'Authorization': AUTH, 'User-Agent': user_agent}\n page = requests.get(f'https://oauth.reddit.com/r/dadjokes/new.json?\\\nlimit=100&after={after}', headers=headers).json()\n try:\n if page['error'] == 401:\n # Get new authorization\n authorization = get_auth()\n headers = {'Authorization': authorization,\n 'User-Agent': user_agent}\n page = requests.get(f'https://oauth.reddit.com/r/dadjokes/new.json?\\\nlimit=100&after={after}', headers=headers).json()\n elif page['error'] == 429:\n sys.stdout('Too Many Requests. Waiting...\\n')\n sys.stdout.flush()\n for t in range(len(75)):\n if t % 5 == 0:\n sys.stdout('{15 + (t - 75) // 15} seconds')\n sys.stdout('\\r')\n sys.stdout.flush()\n time.sleep(.2)\n print('Resuming')\n get_jokes_page(after)\n except:\n pass\n return page\n\n\ndef record_jokes(page, last):\n '''Writes joke information to files.\n '''\n # Size of original file with jokes to compare to final and return\n # error if nothing added\n orig = os.path.getsize(jokes)\n # Ensure object is indeed a listing, otherwise, check if 429 error.\n # If 429 error, wait and repeat. Otherwise, raise error\n try:\n listing = page['data']['children']\n after = page['data']['after']\n before = page['data']['before']\n except:\n print(page)\n raise\n else:\n with open(jokes, 'a') as joke_file:\n for submission in listing:\n sub_data = submission['data']\n write_joke(sub_data, joke_file, 'requests')\n new = os.path.getsize(jokes)\n if new == orig:\n raise ValueError('Nothing added')\n with open(records, 'a') as rec:\n rec.write(f'After: {after}\\n')\n return after\n\n\ndef get_last():\n final = None\n with open(records, 'r') as rec:\n for line in rec:\n final = line.split()[-1]\n return final\n\n\nif __name__ == \"__main__\":\n last = get_last()\n i = 1\n if not last:\n page = get_jokes_page(None)\n prev = record_jokes(page, None)\n print(f'Recorded page {i} with last submission: {prev}')\n last = prev\n i += 1\n\n while last:\n try:\n page = get_jokes_page(last)\n prev = record_jokes(page, last)\n print(f'Recorded page {i} with last submission: {prev}')\n last = prev\n i += 1\n except:\n error = True\n raise\n", "sub_path": "subreddits/limited_results_scripts/reddit_requests.py", "file_name": "reddit_requests.py", "file_ext": "py", "file_size_in_byte": 4290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.auth.HTTPBasicAuth", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "73170860", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nfrom hashlib import md5\nfrom six import print_\n\ndef mine(secret):\n i = 0\n while True:\n current = secret + str(i).encode(\"ascii\")\n digest = md5(current).digest()\n if digest[0] == 0 and digest[1] == 0 and digest[2] <= 0x0f:\n return i\n i += 1\n\ndef main():\n print_(mine(b\"yzbqklnj\"))\n \nif __name__ == \"__main__\":\n main()\n", "sub_path": "day4-1.py", "file_name": "day4-1.py", "file_ext": "py", "file_size_in_byte": 421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "hashlib.md5", "line_number": 12, "usage_type": "call"}, {"api_name": "six.print_", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "187882582", "text": "# -*- coding: utf-8 -*-\n#\n# /)\n# / )\n# (\\ / )\n# ( \\ / )\n# ( \\/ / )\n# (@) )\n# / \\_ \\\n# // \\\\\\\n# (( \\\\\n# ~ ~ ~ \\\n# skylark\n#\n\n\"\"\"\n skylark\n ~~~~~~~\n\n A nice micro orm for python, mysql only.\n\n :copyright: (c) 2014 by Chao Wang (Hit9).\n :license: BSD.\n\"\"\"\n\nimport sys\nfrom datetime import date, datetime, time, timedelta\n\nlib_mysqldb = 0\nlib_pymysql = 0\n\ntry: # try to use MySQLdb, then pymysql\n import MySQLdb as mysql\n from _mysql import escape_dict, escape_sequence, NULL, string_literal\n lib_mysqldb = 1\nexcept ImportError:\n import pymysql as mysql\n from pymysql import NULL, escape_dict, escape_sequence\n from pymysql.converters import escape_str as string_literal\n lib_pymysql = 1\n\n\nif sys.hexversion < 0x03000000:\n PY_VERSION = 2\nelse:\n PY_VERSION = 3\n\n\nif PY_VERSION == 3:\n from functools import reduce\n\n\n__version__ = '0.7.1'\n\n\nOP_LT = 1\nOP_LE = 2\nOP_GT = 3\nOP_GE = 4\nOP_EQ = 5\nOP_NE = 6\nOP_ADD = 7\nOP_AND = 8\nOP_OR = 9\nOP_LIKE = 10\nOP_BETWEEN = 11\nOP_IN = 12\nOP_NOT_IN = 13\n\n\nQUERY_INSERT = 21\nQUERY_UPDATE = 22\nQUERY_SELECT = 23\nQUERY_DELETE = 24\n\n\nclass SkylarkException(Exception):\n pass\n\n\nclass UnSupportedType(SkylarkException):\n pass\n\n\nclass PrimaryKeyValueNotFound(SkylarkException):\n pass\n\n\nclass ForeignKeyNotFound(SkylarkException):\n pass\n\n\ndef patch_mysqldb_cursor(cursor):\n # let MySQLdb.cursor enable fetching after close\n rows = tuple(cursor.fetchall())\n\n def create_generator():\n for row in rows:\n yield row\n\n generator = create_generator()\n\n def fetchall():\n return generator\n\n def fetchone():\n try:\n return generator.next()\n except StopIteration:\n pass\n\n cursor.fetchall = fetchall\n cursor.fetchone = fetchone\n return cursor\n\n\nclass DatabaseType(object):\n\n def __init__(self):\n self.configs = {\n 'host': 'localhost',\n 'port': 3306,\n 'db': '',\n 'user': '',\n 'passwd': '',\n 'charset': 'utf8'\n }\n self.autocommit = True\n self.conn = None\n\n def conn_is_up(self):\n if lib_pymysql:\n return self.conn and self.conn.socket and self.conn._rfile\n if lib_mysqldb:\n return self.conn and self.conn.open\n\n def config(self, autocommit=True, **configs):\n self.configs.update(configs)\n self.autocommit = autocommit\n\n # close active connection on configs change\n if self.conn_is_up():\n self.conn.close()\n\n def connect(self):\n self.conn = mysql.connect(**self.configs)\n self.conn.autocommit(self.autocommit)\n\n def get_conn(self):\n if not self.conn_is_up():\n self.connect()\n\n # make sure current connection is working\n try:\n self.conn.ping()\n except mysql.OperationalError:\n self.connect()\n\n return self.conn\n\n def __del__(self):\n if self.conn_is_up():\n return self.conn.close()\n\n def execute(self, sql):\n cursor = self.get_conn().cursor()\n cursor.execute(sql)\n if lib_mysqldb:\n # copy all data from origin cursor\n patch_mysqldb_cursor(cursor)\n cursor.close()\n return cursor\n\n def change(self, db):\n self.configs['db'] = db\n\n if self.conn_is_up():\n self.conn.select_db(db)\n\n select_db = change # alias\n\n\nDatabase = database = DatabaseType()\n\n\nclass Node(object):\n\n def __repr__(self):\n return '<%s %r>' % (type(self).__name__, Compiler.tostr(self))\n\n def clone(self, *args, **kwargs):\n obj = type(self)(*args, **kwargs)\n\n for key, value in self.__dict__.items():\n setattr(obj, key, value)\n return obj\n\n\nclass Leaf(Node):\n\n def _e(op):\n def e(self, right):\n return Expr(self, right, op)\n return e\n\n __lt__ = _e(OP_LT)\n\n __le__ = _e(OP_LE)\n\n __gt__ = _e(OP_GT)\n\n __ge__ = _e(OP_GE)\n\n __eq__ = _e(OP_EQ)\n\n __ne__ = _e(OP_NE)\n\n __add__ = _e(OP_ADD)\n\n __and__ = _e(OP_AND)\n\n __or__ = _e(OP_OR)\n\n\nclass SQL(Leaf):\n\n def __init__(self, literal):\n self.literal = literal\n\n\nsql = SQL\n\n\nclass Expr(Leaf):\n\n def __init__(self, left, right, op):\n self.left = left\n self.right = right\n self.op = op\n\n\nclass FieldDescriptor(object):\n\n def __init__(self, field):\n self.field = field\n\n def __get__(self, instance, type=None):\n if instance:\n return instance.data[self.field.name]\n return self.field\n\n def __set__(self, instance, value):\n instance.data[self.field.name] = value\n\n\nclass Field(Leaf):\n\n def __init__(self, is_primarykey=False, is_foreignkey=False):\n self.is_primarykey = is_primarykey\n self.is_foreignkey = is_foreignkey\n\n def describe(self, name, model):\n self.name = name\n self.model = model\n self.fullname = '%s.%s' % (self.model.table_name, self.name)\n setattr(model, name, FieldDescriptor(self))\n\n def like(self, pattern):\n return Expr(self, pattern, OP_LIKE)\n\n def between(self, left, right):\n return Expr(self, (left, right), OP_BETWEEN)\n\n def _in(self, *values):\n return Expr(self, values, OP_IN)\n\n def not_in(self, *values):\n return Expr(self, values, OP_NOT_IN)\n\n def alias(self, _alias):\n field = self.clone()\n field.name = _alias\n field.fullname = '%s as %s' % (self.fullname, _alias)\n setattr(self.model, field.name, FieldDescriptor(field))\n return field\n\n\nclass PrimaryKey(Field):\n\n def __init__(self):\n super(PrimaryKey, self).__init__(is_primarykey=True)\n\n\nclass ForeignKey(Field):\n\n def __init__(self, point_to):\n super(ForeignKey, self).__init__(is_foreignkey=True)\n self.point_to = point_to\n\n\nclass Function(Leaf):\n\n def __init__(self, name, *args):\n self.name = name\n self.args = args\n self.fullname = '%s(%s)' % (\n self.name, ', '.join(map(Compiler.tostr, self.args)))\n\n def alias(self, _alias):\n fn = self.clone(self.name, *self.args)\n fn.name = _alias\n fn.fullname = '%s as %s' % (self.fullname, _alias)\n return fn\n\n\nclass Func(object):\n\n def __init__(self, data=None):\n if data is None:\n data = {}\n self.data = data\n\n def __getattr__(self, name):\n if name in self.data:\n return self.data[name]\n raise AttributeError\n\n def __getitem__(self, name):\n return self.data[name]\n\n\nclass Fn(object):\n\n def _e(self, name):\n def e(*args):\n return Function(name, *args)\n return e\n\n def __getattr__(self, name):\n return self._e(name)\n\n\nfn = Fn()\n\n\nclass Distinct(Node):\n # 'distinct user.name, user.email..' -> legal\n # 'user.id distinct user.name' -> illegal\n # 'user.id, count(distinct user.name)' -> legal\n\n def __init__(self, *args):\n self.args = args\n self.fullname = 'distinct(%s)' % ', '.join(\n map(Compiler.tostr, args))\n\n\ndistinct = Distinct\n\n\nclass Query(object):\n\n def __init__(self, type, runtime, target=None):\n self.type = type\n self.sql = Compiler.compile(runtime, self.type, target)\n runtime.reset_data()\n\n def __repr__(self):\n return '<%s %r>' % (type(self).__name__, self.sql)\n\n\nclass InsertQuery(Query):\n\n def __init__(self, runtime, target=None):\n super(InsertQuery, self).__init__(QUERY_INSERT, runtime, target)\n\n def execute(self):\n cursor = Database.execute(self.sql)\n return cursor.lastrowid if cursor.rowcount else None\n\n\nclass UpdateQuery(Query):\n\n def __init__(self, runtime, target=None):\n super(UpdateQuery, self).__init__(QUERY_UPDATE, runtime, target)\n\n def execute(self):\n cursor = Database.execute(self.sql)\n return cursor.rowcount\n\n\nclass SelectQuery(Query):\n\n def __init__(self, runtime, target=None):\n self.from_model = runtime.model\n self.selects = runtime.data['select']\n super(SelectQuery, self).__init__(QUERY_SELECT, runtime, target)\n\n def __iter__(self):\n results = self.execute()\n return results.all()\n\n def execute(self):\n cursor = Database.execute(self.sql)\n return SelectResult(cursor, self.from_model, self.selects)\n\n\nclass DeleteQuery(Query):\n\n def __init__(self, runtime, target=None):\n super(DeleteQuery, self).__init__(QUERY_DELETE, runtime, target)\n\n def execute(self):\n cursor = Database.execute(self.sql)\n return cursor.rowcount\n\n\nclass SelectResult(object):\n\n def __init__(self, cursor, model, nodes):\n self.cursor = cursor\n self.model = model\n self.count = self.cursor.rowcount\n\n # distinct should be the first select node if it exists\n if len(nodes) >= 1 and isinstance(nodes[0], Distinct):\n nodes = list(nodes[0].args) + nodes[1:]\n\n self.fields = {}\n self.funcs = {}\n\n for idx, node in enumerate(nodes):\n if isinstance(node, Field):\n self.fields[idx] = node\n elif isinstance(node, Function):\n self.funcs[idx] = node\n\n # returns: 0->inst, 1->func, 2->inst, func\n if self.fields and not self.funcs:\n self.returns = 0\n elif not self.fields and self.funcs:\n self.returns = 1\n elif self.fields and self.funcs:\n self.returns = 2\n\n def inst(self, model, row):\n inst = model()\n inst.set_in_db(True)\n\n for idx, field in self.fields.items():\n if field.model is model:\n inst.data[field.name] = row[idx]\n return inst\n\n def func(self, row):\n func = Func()\n\n for idx, function in self.funcs.items():\n func.data[function.name] = row[idx]\n return func\n\n def __one(self, row):\n\n func = self.func(row)\n\n if self.model.single:\n inst = self.inst(self.model, row)\n return {\n 0: inst,\n 1: func,\n 2: (inst, func)\n }[self.returns]\n else:\n insts = tuple(map(lambda m: self.inst(m, row), self.model.models))\n return {\n 0: insts,\n 1: func,\n 2: insts + (func, )\n }[self.returns]\n\n def one(self):\n row = self.cursor.fetchone()\n\n if row is None:\n return None\n return self.__one(row)\n\n def all(self):\n rows = self.cursor.fetchall()\n\n for row in rows:\n yield self.__one(row)\n\n def tuples(self):\n for row in self.cursor.fetchall():\n yield row\n\n def dicts(self):\n for row in self.cursor.fetchall():\n dct = {}\n for idx, field in self.fields.items():\n if field.name not in dct:\n dct[field.name] = row[idx]\n else:\n dct[field.fullname] = row[idx]\n for idx, func in self.funcs.items():\n dct[func.name] = row[idx]\n yield dct\n\n\nclass Compiler(object):\n\n mappings = {\n OP_LT: '<',\n OP_LE: '<=',\n OP_GT: '>',\n OP_GE: '>=',\n OP_EQ: '=',\n OP_NE: '<>',\n OP_ADD: '+',\n OP_AND: 'and',\n OP_OR: 'or',\n OP_LIKE: 'like',\n OP_BETWEEN: 'between',\n OP_IN: 'in',\n OP_NOT_IN: 'not in'\n }\n\n patterns = {\n QUERY_INSERT: 'insert into {target} {set}',\n QUERY_UPDATE: 'update {target} {set} {where}',\n QUERY_SELECT: 'select {select} from {from} {where} {groupby}'\n ' {having} {orderby} {limit}',\n QUERY_DELETE: 'delete {target} from {from} {where}'\n }\n\n encoding = 'utf8'\n\n def thing2str(data):\n return string_literal(str(data))\n\n def float2str(data):\n return '%.15g' % data\n\n def None2Null(data):\n return NULL\n\n def bool2str(data):\n return str(int(data))\n\n def unicode2str(data):\n return string_literal(data.encode(Compiler.encoding))\n\n def datetime2str(data):\n return string_literal(data.strftime('%Y-%m-%d %H:%M:%S'))\n\n def date2str(data):\n return string_literal(data.strftime('%Y-%m-%d'))\n\n def time2str(data):\n return string_literal(data.strftime('%H:%M:%S'))\n\n def timedelta2str(data):\n seconds = int(data.seconds) % 60\n minutes = int(data.seconds / 60) % 60\n hours = int(data.seconds / 3600) % 24\n return string_literal('%d %d:%d:%d' % (\n data.days, hours, minutes, seconds))\n\n def node2str(node):\n return node.fullname\n\n def expr2str(expr):\n return Compiler.parse_expr(expr)\n\n def query2str(query):\n return '(%s)' % query.sql\n\n def sql2str(sql):\n return str(sql.literal)\n\n conversions = {\n datetime: datetime2str,\n date: date2str,\n Field: node2str,\n PrimaryKey: node2str,\n ForeignKey: node2str,\n Function: node2str,\n Distinct: node2str,\n sql: sql2str,\n Expr: expr2str,\n Query: query2str,\n InsertQuery: query2str,\n UpdateQuery: query2str,\n SelectQuery: query2str,\n DeleteQuery: query2str,\n time: time2str,\n timedelta: timedelta2str,\n int: thing2str,\n float: float2str,\n str: thing2str,\n bool: bool2str,\n type(None): None2Null,\n tuple: escape_sequence,\n list: escape_sequence,\n dict: escape_dict\n }\n\n if PY_VERSION == 2:\n conversions.update({\n long: thing2str,\n unicode: unicode2str\n })\n\n @staticmethod\n def tostr(e):\n tp = type(e)\n if tp in Compiler.conversions:\n return Compiler.conversions[tp](e)\n raise UnSupportedType\n\n @staticmethod\n def parse_expr(expr):\n tostr = Compiler.tostr\n mappings = Compiler.mappings\n\n left = tostr(expr.left)\n\n if expr.op in (\n OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE,\n OP_ADD, OP_AND, OP_OR, OP_LIKE\n ):\n right = tostr(expr.right)\n elif expr.op is OP_BETWEEN:\n right = '%s and %s' % tuple(map(tostr, expr.right))\n elif expr.op in (OP_IN, OP_NOT_IN):\n right = '(%s)' % ', '.join(map(tostr, expr.right))\n\n string = '%s %s %s' % (left, mappings[expr.op], right)\n\n if expr.op in (OP_AND, OP_OR):\n string = '(%s)' % string\n\n return string\n\n def _compile(pattern):\n def _e(func):\n def e(lst):\n if not lst:\n return ''\n return pattern.format(*func(lst))\n return e\n return _e\n\n @_compile('order by {0}{1}')\n def _orderby(lst):\n node, desc = lst\n return Compiler.tostr(node), ' desc' if desc else ''\n\n @_compile('group by {0}')\n def _groupby(lst):\n return ', '.join(map(Compiler.tostr, lst)),\n\n @_compile('having {0}')\n def _having(lst):\n return ' and '.join(map(Compiler.parse_expr, lst)),\n\n @_compile('where {0}')\n def _where(lst):\n return ' and '.join(map(Compiler.parse_expr, lst)),\n\n @_compile('{0}')\n def _select(lst):\n return ', '.join(f.fullname for f in lst),\n\n @_compile('limit {0}{1}')\n def _limit(lst):\n offset, rows = lst\n return '%s, ' % offset if offset else '', rows\n\n @_compile('set {0}')\n def _set(lst):\n return ', '.join(map(Compiler.parse_expr, lst)),\n\n compilers = {\n 'orderby': _orderby,\n 'groupby': _groupby,\n 'having': _having,\n 'where': _where,\n 'select': _select,\n 'limit': _limit,\n 'set': _set\n }\n\n @staticmethod\n def compile(runtime, type, target=None):\n\n if target is None:\n target = runtime.model\n\n args = {\n 'target': target.table_name,\n 'from': runtime.model.table_name\n }\n\n for key, func in Compiler.compilers.items():\n args[key] = func(runtime.data[key])\n\n pattern = Compiler.patterns[type]\n\n return ' '.join(pattern.format(**args).split())\n\n\nclass Runtime(object):\n\n def __init__(self, model=None):\n self.model = model\n self.reset_data()\n\n def reset_data(self):\n keys = (\n 'where', 'set', 'orderby', 'select', 'limit', 'groupby', 'having')\n # dont use {}.fromkeys(keys, [])\n self.data = dict((key, []) for key in keys)\n\n def __repr__(self):\n return '' % self.data\n\n def set_orderby(self, lst):\n self.data['orderby'] = list(lst)\n\n def set_groupby(self, lst):\n self.data['groupby'] = list(lst)\n\n def set_having(self, lst):\n self.data['having'] = list(lst)\n\n def set_limit(self, lst):\n self.data['limit'] = list(lst)\n\n def set_select(self, lst):\n self.data['select'] = list(lst or self.model.get_fields())\n\n def set_where(self, lst, dct):\n lst = list(lst)\n\n if self.model.single:\n lst.extend(self.model.fields[k] == v for k, v in dct.items())\n\n self.data['where'] = lst\n\n def set_set(self, lst, dct):\n lst = list(lst)\n\n if self.model.single:\n lst.extend(self.model.fields[k] == v for k, v in dct.items())\n\n self.data['set'] = lst\n\n\nclass MetaModel(type):\n\n def __init__(cls, name, bases, attrs):\n table_name = None\n primarykey = None\n fields = {}\n\n for name, value in cls.__dict__.items():\n if isinstance(value, Field):\n fields[name] = value\n if value.is_primarykey:\n primarykey = value\n elif name == 'table_name':\n table_name = value\n\n if table_name is None:\n # default: 'User' => 'user', 'CuteCat' => 'cute_cat'\n table_name = reduce(\n lambda x, y: ('_' if y.isupper() else '').join((x, y)),\n list(cls.__name__)\n ).lower()\n\n if primarykey is None:\n fields['id'] = primarykey = PrimaryKey()\n\n cls.primarykey = primarykey\n cls.table_name = table_name\n cls.fields = fields\n\n for name, field in cls.fields.items():\n field.describe(name, cls)\n\n cls.runtime = Runtime(cls)\n\n def __contains__(cls, inst):\n if isinstance(inst, cls):\n query = cls.where(**inst.data).select()\n results = query.execute()\n if results.count:\n return True\n return False\n\n def __and__(cls, join):\n return JoinModel(cls, join)\n\n\nclass Model(MetaModel('NewBase', (object, ), {})): # py3 compat\n\n single = True\n\n def __init__(self, *lst, **dct):\n self.data = {}\n\n for expr in lst:\n field, value = expr.left, expr.right\n self.data[field.name] = value\n\n self.data.update(dct)\n self._cache = self.data.copy()\n self.set_in_db(False)\n\n def set_in_db(self, boolean):\n self._in_db = boolean\n\n @classmethod\n def get_fields(cls):\n return list(cls.fields.values())\n\n @classmethod\n def insert(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return InsertQuery(cls.runtime)\n\n @classmethod\n def select(cls, *lst):\n cls.runtime.set_select(lst)\n return SelectQuery(cls.runtime)\n\n @classmethod\n def update(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return UpdateQuery(cls.runtime)\n\n @classmethod\n def create(cls, *lst, **dct):\n query = cls.insert(*lst, **dct)\n id = query.execute()\n\n if id is not None:\n dct[cls.primarykey.name] = id\n instance = cls(*lst, **dct)\n instance.set_in_db(True)\n return instance\n return None\n\n @classmethod\n def delete(cls):\n return DeleteQuery(cls.runtime)\n\n @classmethod\n def where(cls, *lst, **dct):\n cls.runtime.set_where(lst, dct)\n return cls\n\n @classmethod\n def at(cls, id):\n return cls.where(cls.primarykey == id)\n\n @classmethod\n def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls\n\n @classmethod\n def groupby(cls, *lst):\n cls.runtime.set_groupby(lst)\n return cls\n\n @classmethod\n def having(cls, *lst):\n cls.runtime.set_having(lst)\n return cls\n\n @classmethod\n def limit(cls, rows, offset=None):\n cls.runtime.set_limit((offset, rows))\n return cls\n\n @classmethod\n def findone(cls, *lst, **dct):\n query = cls.where(*lst, **dct).select()\n results = query.execute()\n return results.one()\n\n @classmethod\n def findall(cls, *lst, **dct):\n query = cls.where(*lst, **dct).select()\n results = query.execute()\n return results.all()\n\n @classmethod\n def getone(cls):\n return cls.select().execute().one()\n\n @classmethod\n def getall(cls):\n return cls.select().execute().all()\n\n @property\n def _id(self):\n return self.data.get(type(self).primarykey.name, None)\n\n def save(self):\n model = type(self)\n\n if not self._in_db: # insert\n id = model.insert(**self.data).execute()\n\n if id is not None:\n self.data[model.primarykey.name] = id\n self.set_in_db(True)\n self._cache = self.data.copy() # sync cache on saving\n return id\n else: # update\n dct = dict(set(self.data.items()) - set(self._cache.items()))\n\n if self._id is None:\n raise PrimaryKeyValueNotFound\n\n if dct:\n query = model.at(self._id).update(**dct)\n rows_affected = query.execute()\n else:\n rows_affected = 0\n self._cache = self.data.copy()\n return rows_affected\n\n def destroy(self):\n if self._in_db:\n if self._id is None:\n raise PrimaryKeyValueNotFound\n result = type(self).at(self._id).delete().execute()\n if result:\n self.set_in_db(False)\n return result\n return None\n\n def aggregator(name):\n @classmethod\n def _func(cls, arg=None):\n if arg is None:\n arg = cls.primarykey\n function = Function(name, arg)\n query = cls.select(function)\n result = query.execute()\n func = result.one()\n return func.data[function.name]\n return _func\n\n count = aggregator('count')\n\n sum = aggregator('sum')\n\n max = aggregator('max')\n\n min = aggregator('min')\n\n avg = aggregator('avg')\n\n\nclass Models(object):\n\n def __init__(self, *models):\n self.models = list(models)\n self.single = False\n self.runtime = Runtime(self)\n self.table_name = ', '.join(m.table_name for m in self.models)\n self.primarykey = [m.primarykey for m in self.models]\n\n def get_fields(self):\n return sum((list(m.get_fields()) for m in self.models), [])\n\n def select(self, *lst):\n self.runtime.set_select(lst)\n return SelectQuery(self.runtime)\n\n def update(self, *lst):\n self.runtime.set_set(lst, {})\n return UpdateQuery(self.runtime)\n\n def delete(self, target=None):\n return DeleteQuery(self.runtime, target=target)\n\n def where(self, *lst):\n self.runtime.set_where(lst, {})\n return self\n\n def orderby(self, field, desc=False):\n self.runtime.set_orderby((field, desc))\n return self\n\n def groupby(self, *lst):\n self.runtime.set_groupby(lst)\n return self\n\n def having(self, *lst):\n self.runtime.set_having(lst)\n return self\n\n def limit(self, rows, offset=None):\n self.runtime.set_limit((offset, rows))\n return self\n\n def findone(self, *lst):\n query = self.where(*lst).select()\n results = query.execute()\n return results.one()\n\n def findall(self, *lst):\n query = self.where(*lst).select()\n results = query.execute()\n return results.all()\n\n def getone(self):\n return self.select().execute().one()\n\n def getall(self):\n return self.select().execute().all()\n\n\nclass JoinModel(Models):\n\n def __init__(self, main, join):\n super(JoinModel, self).__init__(main, join)\n self.bridge = None\n\n for field in main.get_fields():\n if field.is_foreignkey and field.point_to is join.primarykey:\n self.bridge = field\n\n if self.bridge is None:\n raise ForeignKeyNotFound\n\n def _bridge(func):\n def e(self, *args, **kwargs):\n self.runtime.data['where'].append(\n self.bridge == self.bridge.point_to\n )\n return func(self, *args, **kwargs)\n return e\n\n @_bridge\n def select(self, *lst):\n return super(JoinModel, self).select(*lst)\n\n @_bridge\n def update(self, *lst):\n return super(JoinModel, self).update(*lst)\n\n @_bridge\n def delete(self, target=None):\n return super(JoinModel, self).delete(target)\n", "sub_path": "skylark.py", "file_name": "skylark.py", "file_ext": "py", "file_size_in_byte": 25427, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.hexversion", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 146, "usage_type": "call"}, {"api_name": "pymysql.OperationalError", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pymysql.converters.escape_str", "line_number": 539, "usage_type": "call"}, {"api_name": "pymysql.NULL", "line_number": 545, "usage_type": "name"}, {"api_name": "pymysql.converters.escape_str", "line_number": 551, "usage_type": "call"}, {"api_name": "pymysql.converters.escape_str", "line_number": 554, "usage_type": "call"}, {"api_name": "pymysql.converters.escape_str", "line_number": 557, "usage_type": "call"}, {"api_name": "pymysql.converters.escape_str", "line_number": 560, "usage_type": "call"}, {"api_name": "pymysql.converters.escape_str", "line_number": 566, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 582, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 583, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 596, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 597, "usage_type": "name"}, {"api_name": "pymysql.escape_sequence", "line_number": 603, "usage_type": "name"}, {"api_name": "pymysql.escape_sequence", "line_number": 604, "usage_type": "name"}, {"api_name": "pymysql.escape_dict", "line_number": 605, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 777, "usage_type": "call"}]} +{"seq_id": "511095428", "text": "# coding=utf-8\n\"\"\"\npygame-menu\nhttps://github.com/ppizarror/pygame-menu\n\nWIDGET\nBase class for widgets.\n\nLicense:\n-------------------------------------------------------------------------------\nThe MIT License (MIT)\nCopyright 2017-2020 Pablo Pizarro R. @ppizarror\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport pygame\nimport pygame_menu.baseimage as _baseimage\nimport pygame_menu.font as _fonts\nimport pygame_menu.locals as _locals\nfrom pygame_menu.widgets.core.selection import Selection\nfrom pygame_menu.sound import Sound\nfrom pygame_menu.utils import make_surface, assert_alignment, assert_color, assert_position, assert_vector2\n\nfrom uuid import uuid4\nimport time\n\n\nclass Widget(object):\n \"\"\"\n Widget abstract class.\n\n :param title: Widget title\n :type title: str\n :param widget_id: Widget identifier\n :type widget_id: str\n :param onchange: Callback when changing the selector\n :type onchange: function, None\n :param onreturn: Callback when pressing return button\n :type onreturn: callable, None\n :param args: Optional arguments for callbacks\n :param kwargs: Optional keyword-arguments for callbacks\n \"\"\"\n\n def __init__(self,\n title='',\n widget_id='',\n onchange=None,\n onreturn=None,\n args=None,\n kwargs=None\n ):\n assert isinstance(title, str)\n assert isinstance(widget_id, str)\n if onchange:\n assert callable(onchange), 'onchange must be callable or None'\n if onreturn:\n assert callable(onreturn), 'onreturn must be callable or None'\n\n # Store id, if None or empty create new ID based on UUID\n if widget_id is None or len(widget_id) == 0:\n widget_id = uuid4()\n self._attributes = {} # Stores widget attributes\n self._alignment = _locals.ALIGN_CENTER\n self._background_color = None\n self._background_inflate = (0, 0)\n self._events = [] # type: list\n self._id = str(widget_id)\n self._margin = (0.0, 0.0) # type: tuple\n self._max_width = None # type: (int,float)\n self._rect = pygame.Rect(0.0, 0.0, 0.0, 0.0) # type: (pygame.Rect,None)\n self._selected_rect = None # type: (pygame.rect.Rect,None)\n self._selection_time = 0 # type: float\n self._title = title\n\n self._args = args or [] # type: list\n self._kwargs = kwargs or {} # type: dict\n self._on_change = onchange # type: callable\n self._on_return = onreturn # type: callable\n\n # Surface of the widget\n self._surface = None # type: (pygame.Surface,None)\n\n # Menu reference\n self._menu = None\n\n # If this is True then the widget forces the Menu to update because the\n # widget render has changed\n self._menu_surface_needs_update = False\n\n # Modified in set_font() method\n self._font = None # type: (pygame.font.Font,None)\n self._font_antialias = True # type: bool\n self._font_background_color = None # type: (tuple, None)\n self._font_color = (0, 0, 0) # type: tuple\n self._font_name = '' # type: str\n self._font_selected_color = (255, 255, 255) # type: tuple\n self._font_size = 0 # type: int\n\n # Text shadow\n self._shadow = False # type: bool\n self._shadow_color = (0, 0, 0) # type: tuple\n self._shadow_offset = 2.0 # type: float\n self._shadow_position = _locals.POSITION_NORTHWEST\n self._shadow_tuple = None # (x px offset, y px offset)\n self._create_shadow_tuple()\n\n # Rendering, this variable may be used by render() method\n # If the hash of the variables change respect to the last render hash\n # (hash computed using self._hash_variables() method)\n # then the widget should render and update the hash\n self._last_render_hash = 0 # type: int\n\n # Stores the last render surface size, updated by _check_render_size_changed()\n self._last_render_surface_size = (0, 0)\n\n self._selection_effect = None # type: Selection\n\n # Public attributes\n self.active = False # Widget requests focus\n self.is_selectable = True # Some widgets cannot be selected like labels\n self.joystick_enabled = True\n self.mouse_enabled = True\n self.selected = False\n self.selection_effect_enabled = True # Some widgets cannot have selection effect\n self.sound = Sound() # type: Sound\n\n def set_attribute(self, key, value):\n \"\"\"\n Set widget attribute.\n\n :param key: Key of the attribute\n :type key: str\n :param value: Value of the attribute\n :type value: Any\n :return: None\n \"\"\"\n assert isinstance(key, str)\n self._attributes[key] = value\n\n def get_attribute(self, key, default):\n \"\"\"\n Get attribute value.\n\n :param key: Key of the attribute\n :type key: str\n :param default: Value if does not exists\n :type default: Any\n :return: Attribute data\n :rtype: Any\n \"\"\"\n assert isinstance(key, str)\n if key not in self._attributes.keys():\n return default\n return self._attributes[key]\n\n @staticmethod\n def _hash_variables(*args):\n \"\"\"\n Compute hash from a series of variables.\n\n :param args: Variables to compute hash\n :type args: Object\n :return: Hash data\n :rtype: int\n \"\"\"\n return hash(args)\n\n def _render_hash_changed(self, *args):\n \"\"\"\n This method checks if the widget must render because the inner variables changed.\n This method should include all the variables.\n If the render changed,\n\n :param args: Variables to check the hash\n :type args: Object\n :return: Hash data\n :rtype: int\n \"\"\"\n _hash = self._hash_variables(*args)\n if _hash != self._last_render_hash:\n self._last_render_hash = _hash\n return True\n return False\n\n def set_title(self, title): # lgtm [py/inheritance/incorrect-overridden-signature]\n \"\"\"\n Update the widget title.\n\n :param title: New title\n :type title: str\n :return: None\n \"\"\"\n self._title = str(title)\n self._apply_font()\n self._render()\n self._check_render_size_changed()\n\n def get_title(self):\n \"\"\"\n Return the widget title.\n\n :return: Widget title\n :rtype: str\n \"\"\"\n return self._title\n\n def set_background_color(self, color, inflate=(0, 0)):\n \"\"\"\n Set widget background color.\n\n :param color: Widget background color\n :type color: tuple, list, :py:class:`pygame_menu.baseimage.BaseImage`, None\n :param inflate: Inflate background in x,y\n :type inflate: tuple, list\n :return: None\n \"\"\"\n if color is not None:\n if isinstance(color, _baseimage.BaseImage):\n assert color.get_drawing_mode() == _baseimage.IMAGE_MODE_FILL, \\\n 'currently widget only support IMAGE_MODE_FILL drawing mode'\n else:\n assert_color(color)\n assert_vector2(inflate)\n assert inflate[0] >= 0 and inflate[1] >= 0, \\\n 'widget background inflate must be equal or greater than zero in both axis'\n self._background_color = color\n self._background_inflate = inflate\n\n def _fill_background_color(self, surface):\n \"\"\"\n Fill a surface with the widget background color.\n\n :param surface: Surface to fill\n :type surface: :py:class:`pygame.Surface`\n :return: None\n \"\"\"\n if self._background_color is None:\n return\n if isinstance(self._background_color, _baseimage.BaseImage):\n self._background_color.draw(\n surface=surface,\n area=self._rect.inflate(*self._background_inflate),\n position=(self._rect.x - self._background_inflate[0] / 2,\n self._rect.y - self._background_inflate[1] / 2)\n )\n else:\n surface.fill(self._background_color, self._rect.inflate(*self._background_inflate))\n\n def get_selection_effect(self):\n \"\"\"\n :return: Selection effect\n :rtype: :py:class:`pygame_menu.widgets.core.Selection`\n \"\"\"\n return self._selection_effect\n\n def set_selection_effect(self, selection):\n \"\"\"\n Set the selection effect handler.\n\n :param selection: Selection effect class\n :type selection: :py:class:`pygame_menu.widgets.core.Selection`\n :return: None\n \"\"\"\n assert isinstance(selection, Selection)\n self._selection_effect = selection\n\n def apply(self, *args):\n \"\"\"\n Run ``on_return`` callback when return event. A callback function\n receives the following arguments:\n\n .. code-block:: python\n\n callback_func( value, *args, *widget._args, **widget._kwargs )\n\n with:\n - ``value`` (if something is returned by ``get_value()``)\n - ``args`` given to this method\n - ``args`` of the widget\n - ``kwargs`` of the widget\n\n :param args: Extra arguments passed to the callback\n :return: None\n \"\"\"\n if self._on_return:\n args = list(args) + list(self._args)\n try:\n args.insert(0, self.get_value())\n except ValueError:\n pass\n return self._on_return(*args, **self._kwargs)\n\n def change(self, *args):\n \"\"\"\n Run ``on_change`` callback after change event is triggered. A callback function\n receives the following arguments:\n\n .. code-block:: python\n\n callback_func( value, *args, *widget._args, **widget._kwargs )\n\n with:\n - ``value`` (if something is returned by ``get_value()``)\n - ``args`` given to this method\n - ``args`` of the widget\n - ``kwargs`` of the widget\n\n :param args: Extra arguments passed to the callback\n :return: None\n \"\"\"\n if self._on_change:\n args = list(args) + list(self._args)\n try:\n args.insert(0, self.get_value())\n except ValueError:\n pass\n return self._on_change(*args, **self._kwargs)\n\n def draw(self, surface):\n \"\"\"\n Draw the widget shape.\n\n :param surface: Surface to draw\n :type surface: :py:class:`pygame.Surface`\n :return: None\n \"\"\"\n raise NotImplementedError('override is mandatory')\n\n def draw_selection(self, surface):\n \"\"\"\n Draw selection effect on widget.\n\n :param surface: Surface to draw\n :type surface: :py:class:`pygame.Surface`\n :return: None\n \"\"\"\n if not self.is_selectable or self._selection_effect is None or not self.selection_effect_enabled:\n return\n self._selection_effect.draw(surface, self)\n\n def set_max_width(self, width):\n \"\"\"\n Set widget max width (column support) if force_fit_text is enabled.\n\n :param width: Width in px, None if max width is disabled\n :type width: int, float, None\n :return: None\n \"\"\"\n if width is not None:\n assert isinstance(width, (int, float))\n self._max_width = width\n\n def get_margin(self):\n \"\"\"\n :return: Widget margin\n :rtype: tuple\n \"\"\"\n return self._margin\n\n def set_margin(self, x, y):\n \"\"\"\n Set Widget margin.\n\n :param x: Margin on x axis\n :type x: int, float\n :param y: Margin on y axis\n :type y: int, float\n :return: None\n \"\"\"\n assert isinstance(x, (int, float))\n assert isinstance(y, (int, float))\n self._margin = (x, y)\n\n def get_rect(self):\n \"\"\"\n Return the Rect object, this forces the widget rendering\n\n :return: Widget rect\n :rtype: :py:class:`pygame.Rect`\n \"\"\"\n self._render()\n return self._rect.copy()\n\n def get_value(self):\n \"\"\"\n Return the value. If exception ``ValueError`` is raised,\n no value will be passed to the callbacks.\n\n :return: Value\n :rtype: Object\n \"\"\"\n raise ValueError('{}({}) does not accept value'.format(self.__class__.__name__,\n self.get_id()))\n\n def get_id(self):\n \"\"\"\n Returns the widget ID.\n\n :return: ID\n :rtype: str\n \"\"\"\n return self._id\n\n def _render(self):\n \"\"\"\n Render the widget surface.\n\n This method shall update the attribute ``_surface`` with a pygame.Surface\n representing the outer borders of the widget.\n\n :return: None\n \"\"\"\n raise NotImplementedError('override is mandatory')\n\n def _font_render_string(self, text, color=(0, 0, 0), use_background_color=True):\n \"\"\"\n Render text.\n\n :param text: Text to render\n :type text: str\n :param color: Text color\n :type color: tuple\n :param use_background_color: Use default background color\n :type use_background_color: bool\n :return: Text surface\n :rtype: :py:class:`pygame.Surface`\n \"\"\"\n assert isinstance(text, str)\n assert isinstance(color, tuple)\n assert isinstance(use_background_color, bool)\n bgcolor = self._font_background_color\n\n # Background color must be opaque, otherwise the results are quite bad\n if isinstance(bgcolor, (tuple, list)) and len(bgcolor) == 4 and bgcolor[3] != 255:\n bgcolor = None\n\n # Disable\n if not use_background_color:\n bgcolor = None\n\n return self._font.render(text, self._font_antialias, color, bgcolor)\n\n def _check_render_size_changed(self):\n \"\"\"\n Check the size changed after rendering.\n This method should be used only on widgets that can change in size, or if the size\n is changed during execution time (like set_title).\n The update status (needs update if render size changed) is returned by\n Widget.surface_needs_update() method.\n\n :return: Boolean, if True the size changed\n :rtype: bool\n \"\"\"\n if self._rect.size != self._last_render_surface_size:\n self._last_render_surface_size = self._rect.size\n self._menu_surface_needs_update = True\n return True\n return False\n\n def _render_string(self, string, color):\n \"\"\"\n Render text and turn it into a surface.\n\n :param string: Text to render\n :type string: str\n :param color: Text color\n :type color: tuple\n :return: Text surface\n :rtype: :py:class:`pygame.Surface`\n \"\"\"\n text = self._font_render_string(string, color)\n\n # Create surface\n surface = make_surface(width=text.get_width(),\n height=text.get_height(),\n alpha=True)\n\n # Draw shadow first\n if self._shadow:\n text_bg = self._font_render_string(string, self._shadow_color)\n surface.blit(text_bg, self._shadow_tuple)\n\n surface.blit(text, (0, 0))\n new_width = surface.get_size()[0]\n new_height = surface.get_size()[1]\n\n if self._max_width is not None and new_width > self._max_width:\n surface = pygame.transform.smoothscale(surface, (self._max_width, new_height))\n\n return surface\n\n def surface_needs_update(self):\n \"\"\"\n Checks if the widget width/height has changed because events. If so, return true and\n set the status of the widget (menu widget position needs update) as false. This method\n is used by .update() from Menu class.\n\n :return: True if the widget position has changed by events after the rendering.\n :rtype: bool\n \"\"\"\n if self._menu_surface_needs_update:\n self._menu_surface_needs_update = False\n return True\n return False\n\n def set_font(self, font, font_size, color, selected_color, background_color, antialias=True):\n \"\"\"\n Set the text font.\n\n :param font: Name or list of names for font (see pygame.font.match_font for precise format)\n :type font: str, list\n :param font_size: Size of font in pixels\n :type font_size: int\n :param color: Text color\n :type color: tuple\n :param selected_color: Text color when widget is selected\n :type selected_color: tuple\n :param background_color: Font background color\n :type background_color: tuple\n :param antialias: Determines if antialias is applied to font (uses more processing power)\n :type antialias: bool\n :return: None\n \"\"\"\n assert isinstance(font, str)\n assert isinstance(font_size, int)\n assert isinstance(color, tuple)\n assert isinstance(selected_color, tuple)\n assert isinstance(background_color, (tuple, type(None)))\n assert isinstance(antialias, bool)\n\n self._font = _fonts.get_font(font, font_size)\n self._font_antialias = antialias\n self._font_background_color = background_color\n self._font_color = color\n self._font_name = font\n self._font_selected_color = selected_color\n self._font_size = font_size\n\n self._apply_font()\n\n def get_font_info(self):\n \"\"\"\n Return a dict with the information of the widget font.\n\n :return: Dict, keys: size (int), name (str), color (tuple), selected_color (tuple), antialias (bool)\n :rtype: dict\n \"\"\"\n return {\n 'size': self._font_size,\n 'name': self._font_name,\n 'color': self._font_color,\n 'selected_color': self._font_selected_color,\n 'antialias': self._font_antialias,\n }\n\n def set_menu(self, menu):\n \"\"\"\n Set menu reference.\n\n :param menu: Menu object\n :type menu: :py:class:`pygame_menu.Menu`\n :return: None\n \"\"\"\n self._menu = menu\n\n def get_menu(self):\n \"\"\"\n Return menu reference (if exists).\n\n :return: Menu reference\n :rtype: :py:class:`pygame_menu.Menu`\n \"\"\"\n return self._menu\n\n def _apply_font(self):\n \"\"\"\n Function triggered after a font is applied to the widget.\n\n :return: None\n \"\"\"\n raise NotImplementedError('override is mandatory')\n\n def set_position(self, posx, posy):\n \"\"\"\n Set the position.\n\n :param posx: X position\n :type posx: int, float\n :param posy: Y position\n :type posy: int, float\n :return: None\n \"\"\"\n self._rect.x = posx\n self._rect.y = posy\n\n def set_alignment(self, align):\n \"\"\"\n Set the alignment of the widget.\n\n :param align: Widget align, see locals\n :type align: str\n :return: None\n \"\"\"\n assert_alignment(align)\n self._alignment = align\n\n def get_alignment(self):\n \"\"\"\n Returns widget alignment.\n\n :return: Widget align, see locals\n :rtype: str\n \"\"\"\n return self._alignment\n\n def set_selected(self, selected=True):\n \"\"\"\n Mark the widget as selected.\n\n :param selected: Set item as selected\n :type selected: bool\n :return: None\n \"\"\"\n self.selected = selected\n self.active = False\n if selected:\n self._focus()\n self._selection_time = time.time()\n else:\n self._blur()\n self._events = [] # Remove events\n self._render()\n\n def get_selected_time(self):\n \"\"\"\n Return time the widget has been selected in miliseconds.\n If the widget is not currently selected, return 0.\n\n :return: Time in ms\n :rtype: float\n \"\"\"\n if not self.selected:\n return 0\n return (time.time() - self._selection_time) * 1000\n\n def _focus(self):\n \"\"\"\n Function that is executed when the widget receives a focus (is selected).\n\n :return: None\n \"\"\"\n pass\n\n def _blur(self):\n \"\"\"\n Function that is executed when the widget loses the focus.\n\n :return: None\n \"\"\"\n pass\n\n def set_shadow(self, enabled=True, color=None, position=None, offset=None):\n \"\"\"\n Show text shadow.\n\n :param enabled: Shadow is enabled or not\n :type enabled: bool\n :param color: Shadow color\n :type color: list, None\n :param position: Shadow position\n :type position: str, None\n :param offset: Shadow offset\n :type offset: int, float, None\n :return: None\n \"\"\"\n self._shadow = enabled\n if color is not None:\n assert_color(color)\n self._shadow_color = color\n if position is not None:\n assert_position(position)\n self._shadow_position = position\n if offset is not None:\n assert isinstance(offset, (int, float))\n if offset <= 0:\n raise ValueError('shadow offset must be greater than zero')\n self._shadow_offset = offset\n\n # Create shadow tuple position\n self._create_shadow_tuple()\n\n def set_sound(self, sound):\n \"\"\"\n Set sound engine to the widget.\n\n :param sound: Sound object\n :type sound: :py:class:`pygame_menu.sound.Sound`\n :return: None\n \"\"\"\n self.sound = sound\n\n def _create_shadow_tuple(self):\n \"\"\"\n Create shadow position tuple.\n\n :return: None\n \"\"\"\n x = 0\n y = 0\n if self._shadow_position == _locals.POSITION_NORTHWEST:\n x = -1\n y = -1\n elif self._shadow_position == _locals.POSITION_NORTH:\n y = -1\n elif self._shadow_position == _locals.POSITION_NORTHEAST:\n x = 1\n y = -1\n elif self._shadow_position == _locals.POSITION_EAST:\n x = 1\n elif self._shadow_position == _locals.POSITION_SOUTHEAST:\n x = 1\n y = 1\n elif self._shadow_position == _locals.POSITION_SOUTH:\n y = 1\n elif self._shadow_position == _locals.POSITION_SOUTHWEST:\n x = -1\n y = 1\n elif self._shadow_position == _locals.POSITION_WEST:\n x = -1\n self._shadow_tuple = (x * self._shadow_offset, y * self._shadow_offset)\n\n def set_controls(self, joystick=True, mouse=True):\n \"\"\"\n Enable interfaces to control the widget.\n\n :param joystick: Use joystick\n :type joystick: bool\n :param mouse: Use mouse\n :type mouse: bool\n :return: None\n \"\"\"\n self.joystick_enabled = joystick\n self.mouse_enabled = mouse\n\n def set_value(self, value):\n \"\"\"\n Set the value.\n\n .. warning:: This method does not fire the callbacks as it is\n called programmatically. This behavior is deliberately\n chosen to avoid infinite loops.\n\n :param value: Value to be set on the widget\n :type value: Object\n :return: None\n \"\"\"\n raise ValueError('{}({}) does not accept value'.format(self.__class__.__name__,\n self.get_id()))\n\n def update(self, events):\n \"\"\"\n Update internal variable according to the given events list\n and fire the callbacks.\n\n :param events: List of pygame events\n :type events: list[:py:class:`pygame.event.Event`]\n :return: True if updated\n :rtype: bool\n \"\"\"\n raise NotImplementedError('override is mandatory')\n\n def _add_event(self, event):\n \"\"\"\n Add a custom event to the widget for the next update().\n\n :param event: Custom event\n :type event: :py:class:`pygame.event.Event`\n \"\"\"\n self._events.append(event)\n\n def _merge_events(self, events):\n \"\"\"\n Append widget events to events list.\n\n :param events: Event list\n :type events: list[:py:class:`pygame.event.Event`]\n :return: Augmented event list\n :rtype: list[:py:class:`pygame.event.Event`]\n \"\"\"\n if len(self._events) == 0:\n return events\n copy_events = []\n for e in events:\n copy_events.append(e)\n for e in self._events:\n copy_events.append(e)\n self._events = []\n return copy_events\n", "sub_path": "tetris/venv/Lib/site-packages/pygame_menu/widgets/core/widget.py", "file_name": "widget.py", "file_ext": "py", "file_size_in_byte": 26029, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "uuid.uuid4", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame_menu.locals.ALIGN_CENTER", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 80, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame_menu.locals.POSITION_NORTHWEST", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 120, "usage_type": "name"}, {"api_name": "pygame_menu.sound.Sound", "line_number": 142, "usage_type": "call"}, {"api_name": "pygame_menu.baseimage.BaseImage", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pygame_menu.baseimage", "line_number": 235, "usage_type": "name"}, {"api_name": "pygame_menu.baseimage.IMAGE_MODE_FILL", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pygame_menu.baseimage", "line_number": 236, "usage_type": "name"}, {"api_name": "pygame_menu.utils.assert_color", "line_number": 239, "usage_type": "call"}, {"api_name": "pygame_menu.utils.assert_vector2", "line_number": 240, "usage_type": "call"}, {"api_name": "pygame_menu.baseimage.BaseImage", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pygame_menu.baseimage", "line_number": 256, "usage_type": "name"}, {"api_name": "pygame_menu.widgets.core.selection.Selection", "line_number": 281, "usage_type": "argument"}, {"api_name": "pygame_menu.utils.make_surface", "line_number": 491, "usage_type": "call"}, {"api_name": "pygame.transform.smoothscale", "line_number": 505, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 505, "usage_type": "attribute"}, {"api_name": "pygame_menu.font.get_font", "line_number": 548, "usage_type": "call"}, {"api_name": "pygame_menu.font", "line_number": 548, "usage_type": "name"}, {"api_name": "pygame_menu.utils.assert_alignment", "line_number": 621, "usage_type": "call"}, {"api_name": "time.time", "line_number": 645, "usage_type": "call"}, {"api_name": "time.time", "line_number": 661, "usage_type": "call"}, {"api_name": "pygame_menu.utils.assert_color", "line_number": 695, "usage_type": "call"}, {"api_name": "pygame_menu.utils.assert_position", "line_number": 698, "usage_type": "call"}, {"api_name": "pygame_menu.locals.POSITION_NORTHWEST", "line_number": 727, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 727, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_NORTH", "line_number": 730, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 730, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_NORTHEAST", "line_number": 732, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 732, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_EAST", "line_number": 735, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 735, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_SOUTHEAST", "line_number": 737, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 737, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_SOUTH", "line_number": 740, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 740, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_SOUTHWEST", "line_number": 742, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 742, "usage_type": "name"}, {"api_name": "pygame_menu.locals.POSITION_WEST", "line_number": 745, "usage_type": "attribute"}, {"api_name": "pygame_menu.locals", "line_number": 745, "usage_type": "name"}]} +{"seq_id": "522031771", "text": "# -*- coding: utf-8 -*-\nfrom pytorchtools import EarlyStopping\nimport torch\nimport torch as t\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch_model.Deep_NMT_model import LMLoss\nfrom torch_model.Attention_NMT import AttentionNMT\n\nfrom data.iwslt_Data_Loader import iwslt_Data\nimport numpy as np\nfrom tqdm import tqdm\nfrom nltk.translate.bleu_score import corpus_bleu\n\nimport config as argumentparser\n\nconfig = argumentparser.ArgumentParser()\nconfig.filters = list(map(int,config.filters.split(\",\")))\n\ntorch.manual_seed(config.seed)\n\n\nif torch.cuda.is_available():\n torch.cuda.set_device(config.gpu)\n\n\ndef get_dev_loss(model, criterion, data_iter):\n model.eval()\n process_bar = tqdm(data_iter)\n loss = 0\n for source_data, target_data_input, target_data in process_bar:\n if config.cuda and torch.cuda.is_available():\n source_data = source_data.cuda()\n target_data_input = target_data_input.cuda()\n target_data = target_data.cuda()\n else:\n source_data = torch.autograd.Variable(source_data).long()\n target_data_input = torch.autograd.Variable(target_data_input).long()\n target_data = torch.autograd.Variable(target_data).squeeze()\n out = model(source_data, target_data_input)\n loss_now = criterion(out.view(-1, 30000), autograd.Variable(target_data.view(-1).long()))\n weights = target_data.view(-1) != 0\n loss_now = torch.sum((loss_now * weights.float())) / torch.sum(weights.float())\n loss += loss_now.data.item()\n return loss\n\ndef get_test_bleu(model, target_id2word, data_iter):\n model.eval()\n process_bar = tqdm(data_iter)\n refs = []\n preds = []\n for source_data, target_data_input, target_data in process_bar:\n target_input = torch.Tensor(np.zeros([source_data.shape[0], 1])+2)\n if config.cuda and torch.cuda.is_available():\n source_data = source_data.cuda()\n target_input = target_input.cuda().long()\n else:\n source_data = torch.autograd.Variable(source_data).long()\n target_input = torch.autograd.Variable(target_input).long()\n target_data = target_data.numpy()\n out = model(source_data, target_input,mode=\"test\")\n out = np.array(out).T\n tmp_preds = []\n for i in range(out.shape[0]):\n tmp_preds.append([])\n for i in range(out.shape[0]):\n for j in range(out.shape[1]):\n if out[i][j]!=3:\n tmp_preds[i].append(out[i][j])\n else:\n break\n preds += tmp_preds\n tmp_refs = []\n for i in range(target_data.shape[0]):\n tmp_refs.append([])\n for i in range(target_data.shape[0]):\n for j in range(target_data.shape[1]):\n if target_data[i][j]!=3 and target_data[i][j]!=0:\n tmp_refs[i].append(target_data[i][j])\n tmp_refs = [[x] for x in tmp_refs]\n refs+=tmp_refs\n bleu = corpus_bleu(refs,preds)*100\n with open(\"./data/result.txt\",\"w\") as f:\n for i in range(len(preds)):\n tmp_ref = [target_id2word[id] for id in refs[i][0]]\n tmp_pred = [target_id2word[id] for id in preds[i]]\n f.write(\"ref: \"+\" \".join(tmp_ref)+\"\\n\")\n f.write(\"pred: \"+\" \".join(tmp_pred)+\"\\n\")\n f.write(\"\\n\\n\")\n return bleu\n\n\nimport config as argumentparser\n\nif __name__ == '__main__':\n # source_vocab_size=30000,target_vocab_size=30000,embedding_size=256,\n # source_length=100,target_length=100,lstm_size=256\n\n config = argumentparser.ArgumentParser()\n training_set = iwslt_Data()\n training_iter = torch.utils.data.DataLoader(dataset=training_set,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=0)\n\n valid_set = iwslt_Data(source_data_name=\"IWSLT14.TED.dev2010.de-en.de\",\n target_data_name=\"IWSLT14.TED.dev2010.de-en.en\")\n valid_iter = torch.utils.data.DataLoader(dataset=valid_set,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=0)\n test_set = iwslt_Data(source_data_name=\"IWSLT14.TED.tst2012.de-en.de\",\n target_data_name=\"IWSLT14.TED.tst2012.de-en.en\")\n test_iter = torch.utils.data.DataLoader(dataset=test_set,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=0)\n model = AttentionNMT(config)\n criterion = LMLoss()\n if config.cuda and torch.cuda.is_available():\n model.cuda()\n criterion.cuda()\n\n print(torch.cuda.is_available(), config.cuda)\n\n optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)\n target_id2word = dict([[x[1], x[0]] for x in training_set.target_word2id.items()])\n loss = -1\n\n for epoch in range(config.epoch):\n model.train()\n process_bar = tqdm(training_iter)\n for source_data, target_data_input, target_data in process_bar:\n model.train()\n\n for source_data, target_data_input, target_data in process_bar:\n if config.cuda and torch.cuda.is_available():\n source_data = source_data.cuda()\n target_data_input = target_data_input.cuda()\n target_data = target_data.cuda()\n else:\n source_data = torch.autograd.Variable(source_data).long()\n target_data_input = torch.autograd.Variable(target_data_input).long()\n target_data = torch.autograd.Variable(target_data).squeeze()\n out = model(source_data, target_data_input)\n\n loss_now = criterion(target_data, out)\n if loss == -1:\n loss = loss_now.data.item()\n else:\n loss = 0.95 * loss + 0.05 * loss_now.data.item()\n process_bar.set_postfix(loss=loss_now.data.item())\n process_bar.update()\n optimizer.zero_grad()\n loss_now.backward()\n optimizer.step()\n test_bleu = get_test_bleu(test_iter)\n print(\"test bleu is:\", test_bleu)\n valid_loss = get_dev_loss(valid_iter)\n print(\"valid loss is:\", valid_loss)", "sub_path": "torch_attention_nmt.py", "file_name": "torch_attention_nmt.py", "file_ext": "py", "file_size_in_byte": 6653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "config.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "config.filters", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.filters.split", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 21, "usage_type": "call"}, {"api_name": "config.seed", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.gpu", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 30, "usage_type": "call"}, {"api_name": "config.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 44, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "config.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.corpus_bleu", "line_number": 83, "usage_type": "call"}, {"api_name": "config.ArgumentParser", "line_number": 100, "usage_type": "call"}, {"api_name": "data.iwslt_Data_Loader.iwslt_Data", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 102, "usage_type": "attribute"}, {"api_name": "config.batch_size", "line_number": 103, "usage_type": "attribute"}, {"api_name": "data.iwslt_Data_Loader.iwslt_Data", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "config.batch_size", "line_number": 110, "usage_type": "attribute"}, {"api_name": "data.iwslt_Data_Loader.iwslt_Data", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 115, "usage_type": "attribute"}, {"api_name": "config.batch_size", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch_model.Attention_NMT.AttentionNMT", "line_number": 119, "usage_type": "call"}, {"api_name": "torch_model.Deep_NMT_model.LMLoss", "line_number": 120, "usage_type": "call"}, {"api_name": "config.cuda", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 125, "usage_type": "attribute"}, {"api_name": "config.cuda", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 127, "usage_type": "name"}, {"api_name": "config.learning_rate", "line_number": 127, "usage_type": "attribute"}, {"api_name": "config.epoch", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 133, "usage_type": "call"}, {"api_name": "config.cuda", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 145, "usage_type": "attribute"}]} +{"seq_id": "310143459", "text": "from django.conf.urls import url\nfrom mysite.blog.views import post_list, post_detail, post_new, post_edit\n\n\nurlpatterns = [\n url(r'^$', post_list),\n url(r'^post/(?P[0-9]+)/$', post_detail),\n url(r'^post/new/$', post_new, name='post_new'),\n url(r'^post/(?P[0-9]+)/edit/$', post_edit, name='post_edit'),\n]\n", "sub_path": "mysite/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "mysite.blog.views.post_list", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "mysite.blog.views.post_detail", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "mysite.blog.views.post_new", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "mysite.blog.views.post_edit", "line_number": 9, "usage_type": "argument"}]} +{"seq_id": "449859332", "text": "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nfrom pandas import ExcelWriter\nfrom numpy import interp\nimport math\n\ndef Load_results1(instance,i,n,type_generator):\n '''\n This function loads the results that depend of the periods in to a dataframe and creates a excel file with it.\n \n :param instance: The instance of the project resolution created by PYOMO.\n \n :return: A dataframe called Time_series with the values of the variables that depend of the periods. \n '''\n \n path = 'Results/Results' + '_' + str(i) + '_' + str(n) + '.xls'\n writer = ExcelWriter(path, engine='xlsxwriter')\n \n # Load the variables that does not depend of the periods in python dyctionarys\n Number_Scenarios = int(instance.Scenarios.extract_values()[None])\n Number_Periods = int(instance.Periods.extract_values()[None])\n Number_Renewable_Source = int(instance.Renewable_Source.extract_values()[None])\n Number_Generator = int(instance.Generator_Type.extract_values()[None])\n \n Renewable_Nominal_Capacity = instance.Renewable_Nominal_Capacity.extract_values()\n Inverter_Efficiency_Renewable = instance.Renewable_Inverter_Efficiency.extract_values()\n Renewable_Invesment_Cost = instance.Renewable_Invesment_Cost.extract_values()\n OyM_Renewable = instance.Maintenance_Operation_Cost_Renewable.extract_values()\n Renewable_Units = instance.Renewable_Units.get_values()\n Fix_Invesment = instance.Fix_Invesment_PV.extract_values()\n Integer_PV = instance.Integer_PV.get_values()\n Data_Renewable = pd.DataFrame()\n \n for r in range(1, Number_Renewable_Source + 1):\n \n Name = 'Source ' + str(r)\n Data_Renewable.loc['Units', Name] = Renewable_Units[r]\n Data_Renewable.loc['Nominal Capacity (W)', Name] = Renewable_Nominal_Capacity[r]\n Data_Renewable.loc['Inverter Efficiency', Name] = Inverter_Efficiency_Renewable[r]\n Data_Renewable.loc['Investment Cost (USD/W)', Name] = Renewable_Invesment_Cost[r]\n Data_Renewable.loc['OyM', Name] = OyM_Renewable[r]\n Data_Renewable.loc['Fix invesment', Name] = Fix_Invesment[r]\n Data_Renewable.loc['Investment Decision', Name] = Integer_PV[r]\n Data_Renewable.loc['Invesment (USD)', Name] = Fix_Invesment[r]*Integer_PV[r] + Renewable_Units[r]*Renewable_Nominal_Capacity[r]*Renewable_Invesment_Cost[r] \n Data_Renewable.loc['OyM Cost (USD)', Name] = Renewable_Units[r]*Renewable_Nominal_Capacity[r]*Renewable_Invesment_Cost[r]*OyM_Renewable[r] \n Data_Renewable.loc['Total Nominal Capacity (W)', Name] = Data_Renewable.loc['Nominal Capacity (W)', Name]*Data_Renewable.loc['Units', Name] \n \n Data_Renewable.to_excel(writer, sheet_name='Data Renewable') \n \n columns = []\n for i in range(1, Number_Scenarios+1):\n columns.append('Scenario_'+str(i))\n\n\n # Energy Time Series\n Scenarios = pd.DataFrame()\n \n Number = 7\n \n if instance.Lost_Load_Probability > 0: \n Lost_Load = instance.Lost_Load.get_values()\n Number += 1\n \n Renewable_Energy_1 = instance.Renewable_Energy_Production.extract_values()\n Renewable_Units = instance.Renewable_Units.get_values()\n Renewable_Energy = {}\n \n for s in range(1, Number_Scenarios + 1):\n for t in range(1, Number_Periods+1):\n \n foo = []\n for r in range(1,Number_Renewable_Source+1 ):\n foo.append((s,r,t))\n \n Renewable_Energy[s,t] = sum(Renewable_Energy_1[s,r,t]*Data_Renewable.loc['Inverter Efficiency', 'Source ' + str(r)]\n *Data_Renewable.loc['Units', 'Source ' + str(r)] for s,r,t in foo)\n \n Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()\n Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()\n Curtailment = instance.Energy_Curtailment.get_values()\n Energy_Demand = instance.Energy_Demand.extract_values()\n SOC = instance.State_Of_Charge_Battery.get_values()\n Generator_Energy = instance.Generator_Energy.get_values()\n Total_Generator_Energy = {}\n \n \n for s in range(1, Number_Scenarios + 1):\n for t in range(1, Number_Periods+1):\n foo = []\n for g in range(1,Number_Generator+1):\n foo.append((s,g,t))\n Total_Generator_Energy[s,t] = sum(Generator_Energy[i] for i in foo)\n \n Scenarios_Periods = [[] for i in range(Number_Scenarios)]\n \n for i in range(0,Number_Scenarios):\n for j in range(1, Number_Periods+1):\n Scenarios_Periods[i].append((i+1,j))\n foo=0 \n for i in columns:\n Information = [[] for i in range(Number)]\n for j in Scenarios_Periods[foo]:\n \n Information[0].append(Renewable_Energy[j]) \n Information[1].append(Battery_Flow_Out[j]) \n Information[2].append(Battery_Flow_in[j]) \n Information[3].append(Curtailment[j]) \n Information[4].append(Energy_Demand[j]) \n Information[5].append(SOC[j])\n Information[6].append(Total_Generator_Energy[j])\n if instance.Lost_Load_Probability > 0: \n Information[7].append(Lost_Load[j])\n \n Scenarios=Scenarios.append(Information)\n foo+=1\n \n index=[] \n for j in range(1, Number_Scenarios+1): \n index.append('Renewable Energy '+str(j) + ' (Wh)')\n index.append('Battery Flow Out '+str(j) + ' (Wh)') \n index.append('Battery Flow in '+str(j) + ' (Wh)')\n index.append('Curtailment '+str(j) + ' (Wh)')\n index.append('Energy Demand '+str(j) + ' (Wh)')\n index.append('SOC '+str(j) + ' (Wh)')\n index.append('Gen energy '+str(j) + ' (Wh)')\n if instance.Lost_Load_Probability > 0: \n index.append('Lost Load '+str(j) + ' (Wh)')\n Scenarios.index= index\n \n \n \n \n # Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'\n if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes\n foo = str(instance.Delta_Time()) # trasform the number into a string\n hour = foo[0] # Extract the first character\n minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency\n elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency\n else: # if the step is in minutes\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency\n \n Scenarios.columns = columns\n Scenarios = Scenarios.transpose()\n \n Scenarios.to_excel(writer, sheet_name='Time Series') # Creating an excel file with the values of the variables that are in function of the periods\n \n columns = [] # arreglar varios columns\n for i in range(1, Number_Scenarios+1):\n columns.append('Scenario '+str(i))\n \n Scenario_information =[[] for i in range(Number_Scenarios)]\n Scenario_Weight = instance.Scenario_Weight.extract_values()\n\n \n for i in range(1, Number_Scenarios+1):\n Scenario_information[i-1].append(Scenario_Weight[i])\n \n\n \n Scenario_Information = pd.DataFrame(Scenario_information,index=columns)\n Scenario_Information.columns=['Scenario Weight']\n Scenario_Information = Scenario_Information.transpose()\n \n Scenario_Information.to_excel(writer, sheet_name='Scenario Information')\n \n Renewable_Energy = pd.DataFrame()\n \n for s in range(1, Number_Scenarios + 1):\n for r in range(1, Number_Renewable_Source + 1):\n column = 'Renewable ' + str(s) + ' ' + str(r) + ' (Wh)'\n column2 = 'Renewable unit ' + str(s) + ' ' + str(r) + ' (Wh)'\n Energy = []\n Unit_Energy = []\n for t in range(1, Number_Periods + 1):\n Source = 'Source ' + str(r)\n Energy.append(Renewable_Energy_1[s,r,t]*Data_Renewable.loc['Inverter Efficiency', Source]\n *Data_Renewable.loc['Units', Source])\n Unit_Energy.append(Renewable_Energy_1[s,r,t])\n \n Renewable_Energy[column] = Energy\n Renewable_Energy[column2] = Unit_Energy\n \n Renewable_Energy.index = Scenarios.index\n Renewable_Energy.to_excel(writer, sheet_name='Renewable Energy Time Series')\n \n \n\n Generator_Data = pd.DataFrame()\n \n if instance.formulation == 'LP':\n \n Generator_Efficiency = instance.Generator_Efficiency.extract_values()\n Low_Heating_Value = instance.Low_Heating_Value.extract_values()\n Fuel_Cost = instance.Fuel_Cost.extract_values()\n Generator_Invesment_Cost = instance.Generator_Invesment_Cost.extract_values()\n Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.get_values()\n Maintenance_Operation_Cost_Generator = instance.Maintenance_Operation_Cost_Generator.extract_values()\n \n for g in range(1, Number_Generator + 1):\n Name = 'Generator ' + str(g)\n Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]\n Generator_Data.loc['Low Heating Value (Wh/l)',Name] = Low_Heating_Value[g]\n Generator_Data.loc['Fuel Cost (USD/l)',Name] = Fuel_Cost[g]\n Generator_Data.loc['Generator Invesment Cost (USD/W)',Name] = Generator_Invesment_Cost[g]\n Generator_Data.loc['Generator Nominal Capacity (W)',Name] = Generator_Nominal_Capacity[g]\n Generator_Data.loc['OyM Generator', Name] = Maintenance_Operation_Cost_Generator[g]\n Generator_Data.loc['Invesment Generator (USD)', Name] = Generator_Invesment_Cost[g]*Generator_Nominal_Capacity[g]\n Generator_Data.loc['OyM Cost (USD)', Name] = Generator_Data.loc['Invesment Generator (USD)', Name]*Generator_Data.loc['OyM Generator', Name]\n Generator_Data.loc['Marginal Cost (USD/Wh)', Name] = (Generator_Data.loc['Fuel Cost (USD/l)',Name]/\n (Generator_Data.loc['Generator Efficiency',Name]*Generator_Data.loc['Low Heating Value (Wh/l)',Name]))\n Generator_Data.loc['Marginal Cost (USD/Wh)', Name] = round(Generator_Data.loc['Marginal Cost (USD/Wh)', Name],3)\n \n if instance.formulation == 'MILP':\n Generator_Min_Out_Put = instance.Generator_Min_Out_Put.extract_values()\n Generator_Efficiency = instance.Generator_Efficiency.extract_values()\n Low_Heating_Value = instance.Low_Heating_Value.extract_values()\n Fuel_Cost = instance.Fuel_Cost.extract_values()\n Generator_Invesment_Cost = instance.Generator_Invesment_Cost.extract_values()\n Cost_Increase = instance.Cost_Increase.extract_values() \n Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.extract_values()\n if type_generator == 'Fix':\n Integer_generator = instance.Integer_generator \n else:\n Integer_generator = instance.Integer_generator.get_values()\n Maintenance_Operation_Cost_Generator = instance.Maintenance_Operation_Cost_Generator.extract_values()\n \n for g in range(1, Number_Generator + 1):\n Name = 'Generator ' + str(g)\n Generator_Data.loc['Generator Nominal Capacity (W)',Name] = Generator_Nominal_Capacity[g]\n Generator_Data.loc['Generator Min Out Put',Name] = Generator_Min_Out_Put[g]\n Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]\n Generator_Data.loc['Low Heating Value (Wh/l)',Name] = Low_Heating_Value[g]\n Generator_Data.loc['Fuel Cost (USD/l)',Name] = Fuel_Cost[g]\n Generator_Data.loc['Generator Invesment Cost (USD/W)',Name] = Generator_Invesment_Cost[g]\n Generator_Data.loc['Cost Increase',Name] = Cost_Increase[g]\n M_1 = Fuel_Cost[g]/(Generator_Efficiency[g]*Low_Heating_Value[g])\n M_1 = round(M_1,3)\n Generator_Data.loc['Marginal cost Full load (USD/Wh)',Name] = M_1\n Generator_Data.loc['Start Cost Generator (USD)',Name] = M_1*Generator_Nominal_Capacity[g]*Cost_Increase[g]\n Generator_Data.loc['Start Cost Generator (USD)',Name] = round(Generator_Data.loc['Start Cost Generator (USD)',Name],3)\n M_2 = (M_1*Generator_Nominal_Capacity[g]-Generator_Data.loc['Start Cost Generator (USD)',Name])/ \\\n Generator_Nominal_Capacity[g]\n Generator_Data.loc['Marginal cost Partial load (USD/Wh)',Name] = round(M_2,3)\n Generator_Data.loc['Number of Generators', Name] = Integer_generator[g]\n Generator_Data.loc['Maintenance Operation Cost Generator', Name] = Maintenance_Operation_Cost_Generator[g]\n Generator_Data.loc['Invesment Generator (USD)', Name] = (Generator_Nominal_Capacity[g]\n *Integer_generator[g]*Generator_Invesment_Cost[g])\n Generator_Data.loc['OyM Cost (USD)', Name] = (Generator_Nominal_Capacity[g]*Integer_generator[g]\n *Generator_Invesment_Cost[g]\n *Maintenance_Operation_Cost_Generator[g])\n \n Generator_Data.to_excel(writer, sheet_name='Generator Data') \n\n Project_Data = pd.Series()\n Project_Data['Net Present Cost (USD)'] = instance.ObjectiveFuntion.expr()\n Project_Data['Discount Rate'] = instance.Discount_Rate.value\n Project_Data['Proyect Life Time (years)'] = instance.Years.value\n Project_Data['Value of lost load (USD/Wh)'] = instance.Value_Of_Lost_Load.value\n a = Project_Data['Discount Rate']*((1+Project_Data['Discount Rate'])**Project_Data['Proyect Life Time (years)'])\n b = ((1 + Project_Data['Discount Rate'])**Project_Data['Proyect Life Time (years)']) - 1\n Project_Data['Capital Recovery Factor'] = round(a/b,3)\n if instance.Curtailment_Unitary_Cost > 0:\n Project_Data['Curtailment Unitary Cost (USD/Wh)'] = instance.Curtailment_Unitary_Cost\n \n Project_Data.to_excel(writer, sheet_name='Project Data') \n \n \n Battery_Nominal_Capacity = instance.Battery_Nominal_Capacity.get_values()[None]\n PriceBattery = instance.Battery_Invesment_Cost.value\n Battery_Electronic_Invesmente_Cost = instance.Battery_Electronic_Invesmente_Cost.value\n OM_Bat = instance.Maintenance_Operation_Cost_Battery.value\n SOC_1 = instance.Battery_Initial_SOC.value\n Ch_bat_eff = instance.Charge_Battery_Efficiency.value\n Dis_bat_eff = instance.Discharge_Battery_Efficiency.value\n Deep_of_Discharge = instance.Deep_of_Discharge.value\n Battery_Cycles = instance.Battery_Cycles.value\n Fix_Invesment_Battery = instance.Fix_Invesment_Battery.extract_values()[None]\n Integer_Battery = instance.Integer_Battery.get_values()[None]\n \n \n Unitary_Battery_Cost = PriceBattery - Battery_Electronic_Invesmente_Cost\n Battery_Repostion_Cost = Unitary_Battery_Cost/(Battery_Cycles*2*(1-Deep_of_Discharge))\n Battery_Repostion_Cost = round(Battery_Repostion_Cost, 3)\n Battery_Data = pd.DataFrame()\n \n Battery_Data.loc['Nominal Capacity (Wh)','Battery'] = Battery_Nominal_Capacity\n Battery_Data.loc['Unitary Invesment Cost (USD/Wh)','Battery'] = PriceBattery\n Battery_Data.loc['Unitary invesment cost electronic equipment (USD/Wh)','Battery'] = Battery_Electronic_Invesmente_Cost\n Battery_Data.loc['OyM','Battery'] = OM_Bat\n Battery_Data.loc['Initial State of Charge','Battery'] = SOC_1\n Battery_Data.loc['Charge efficiency','Battery'] = Ch_bat_eff\n Battery_Data.loc['Discharge efficiency','Battery'] = Dis_bat_eff\n Battery_Data.loc['Deep of Discharge','Battery'] = Deep_of_Discharge\n Battery_Data.loc['Battery Cycles','Battery'] = Battery_Cycles\n Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'] = Battery_Repostion_Cost\n Battery_Data.loc['Fix invesment','Battery'] = Fix_Invesment_Battery\n Battery_Data.loc['Investment Decision','Battery'] = Integer_Battery \n \n \n Battery_Data.loc['Invesment Cost (USD)','Battery'] = Fix_Invesment_Battery*Integer_Battery + Battery_Nominal_Capacity*PriceBattery\n Battery_Data.loc['OyM Cost (USD)', 'Battery'] = Battery_Nominal_Capacity*PriceBattery*OM_Bat\n \n Battery_Data.to_excel(writer, sheet_name='Battery Data')\n \n Generator_Time_Series = pd.DataFrame()\n \n if instance.formulation == 'LP': \n for s in range(1, Number_Scenarios + 1): \n for g in range(1, Number_Generator + 1):\n column_1 = 'Energy Generator ' + str(s) + ' ' + str(g) + ' (Wh)' \n column_2 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)' \n Name = 'Generator ' + str(g)\n for t in range(1, Number_Periods + 1):\n Generator_Time_Series.loc[t,column_1] = Generator_Energy[s,g,t]\n Generator_Time_Series.loc[t,column_2] = (Generator_Time_Series.loc[t,column_1]\n *Generator_Data.loc['Marginal Cost (USD/Wh)', Name]) \n if instance.formulation == 'MILP':\n Generator_Integer = instance.Generator_Energy_Integer.get_values()\n for s in range(1, Number_Scenarios + 1):\n for g in range(1, Number_Generator + 1):\n column_1 = 'Energy Generator ' + str(s) + ' ' + str(g) + ' (Wh)' \n column_2 = 'Integer Generator ' + str(s) + ' ' + str(g)\n column_3 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)' \n Name = 'Generator ' + str(g)\n for t in range(1, Number_Periods + 1):\n Generator_Time_Series.loc[t,column_1] = Generator_Energy[s,g,t]\n Generator_Time_Series.loc[t,column_2] = Generator_Integer[s,g,t]\n Generator_Time_Series.loc[t,column_3] = (Generator_Integer[s,g,t]*Generator_Data.loc['Start Cost Generator (USD)',Name] \n + Generator_Energy[s,g,t]*Generator_Data.loc['Marginal cost Partial load (USD/Wh)',Name] )\n \n \n Generator_Time_Series.index = Scenarios.index \n Generator_Time_Series.to_excel(writer, sheet_name='Generator Time Series') \n\n Cost_Time_Series = pd.DataFrame()\n for s in range(1, Number_Scenarios + 1):\n if instance.Lost_Load_Probability > 0:\n name_1 = 'Lost Load ' + str(s) + ' (Wh)'\n name_1_1 = 'Lost Load ' + str(s) + ' (USD)'\n name_2 = 'Battery Flow Out ' + str(s) + ' (Wh)' \n name_2_1 = 'Battery Flow Out ' + str(s) + ' (USD)' \n name_3 = 'Battery Flow in ' + str(s) + ' (Wh)' \n name_3_1 = 'Battery Flow In ' + str(s) + ' (USD)' \n name_4_1 = 'Generator Cost ' + str(s) + ' (USD)' \n\n for t in Scenarios.index:\n if instance.Lost_Load_Probability > 0:\n Cost_Time_Series.loc[t,name_1_1] = Scenarios[name_1][t]*Project_Data['Value of lost load (USD/Wh)']\n Cost_Time_Series.loc[t,name_2_1] = (Scenarios[name_2][t]\n *Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'])\n Cost_Time_Series.loc[t,name_3_1] = (Scenarios[name_3][t]\n *Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'])\n Fuel_Cost = 0\n for g in range(1, Number_Generator + 1):\n name_5 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)' \n Fuel_Cost += Generator_Time_Series.loc[t,name_5]\n Cost_Time_Series.loc[t,name_4_1] = Fuel_Cost\n \n if instance.Curtailment_Unitary_Cost > 0:\n name_6 = 'Curtailment ' + str(s) + ' (Wh)'\n name_6_1 = 'Curtailment Cost ' + str(s) + ' (USD)' \n Cost_Time_Series.loc[t,name_6_1] = (Scenarios[name_6][t]*Project_Data['Curtailment Unitary Cost (USD/Wh)'])\n \n Cost_Time_Series.to_excel(writer, sheet_name='Cost Time Series')\n \n Scenario_Cost = pd.DataFrame() \n for s in range(1, Number_Scenarios + 1):\n if instance.Lost_Load_Probability > 0:\n name_1_1 = 'Lost Load ' + str(s) + ' (USD)'\n name_1 ='Lost Load (USD)'\n name_2_1 = 'Battery Flow Out ' + str(s) + ' (USD)'\n name_2 = 'Battery Flow Out (USD)'\n name_3_1 = 'Battery Flow In ' + str(s) + ' (USD)' \n name_3 = 'Battery Flow In (USD)'\n name_4_1 = 'Generator Cost ' + str(s) + ' (USD)'\n name_4 = 'Generator Cost (USD)'\n if instance.Curtailment_Unitary_Cost > 0:\n name_6 = 'Curtailment ' + str(s) + ' (Wh)'\n name_6_1 = 'Curtailment Cost ' + str(s) + ' (USD)' \n \n \n name_5 = 'Scenario ' + str(s)\n if instance.Lost_Load_Probability > 0:\n Scenario_Cost.loc[name_1,name_5] = Cost_Time_Series[name_1_1].sum()\n Scenario_Cost.loc[name_2,name_5] = Cost_Time_Series[name_2_1].sum()\n Scenario_Cost.loc[name_3,name_5] = Cost_Time_Series[name_3_1].sum()\n Scenario_Cost.loc[name_4,name_5] = Cost_Time_Series[name_4_1].sum() \n if instance.Curtailment_Unitary_Cost > 0: \n Scenario_Cost.loc[name_6,name_5] = Cost_Time_Series[name_6_1].sum() \n \n gen_oym = 0\n for g in range(1, Number_Generator + 1):\n Name_2 = 'Generator ' + str(g)\n gen_oym += Generator_Data.loc['OyM Cost (USD)', Name_2]\n Scenario_Cost.loc['Gen OyM Cost (USD)',name_5] = gen_oym\n \n renewable_energy_oym = 0\n for r in range(1, Number_Renewable_Source + 1):\n Name = 'Source ' + str(r)\n renewable_energy_oym += Data_Renewable.loc['OyM Cost (USD)', Name]\n Scenario_Cost.loc['PV OyM Cost (USD)',name_5] = renewable_energy_oym\n\n Scenario_Cost.loc['Battery OyM Cost (USD)',name_5] = Battery_Data['Battery']['OyM Cost (USD)']\n \n Scenario_Cost.loc['Operation Cost (USD)',name_5] = Scenario_Cost[name_5].sum() \n \n Discount_rate = Project_Data['Discount Rate']\n Years = int(Project_Data['Proyect Life Time (years)'])\n \n \n Scenario_Cost.loc['OyM (USD)',name_5] = (Scenario_Cost.loc['Gen OyM Cost (USD)',name_5] \n +Scenario_Cost.loc['PV OyM Cost (USD)',name_5]\n +Scenario_Cost.loc['Battery OyM Cost (USD)',name_5])\n \n Scenario_Cost.loc['Present Gen Cost (USD)',name_5] = Scenario_Cost.loc[name_4,name_5]/Project_Data['Capital Recovery Factor']\n if instance.Lost_Load_Probability > 0:\n Scenario_Cost.loc['Present Lost Load Cost (USD)',name_5] = Scenario_Cost.loc[name_1,name_5]/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present Bat Out Cost (USD)',name_5] = Scenario_Cost.loc[name_2,name_5]/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present Bat In Cost (USD)',name_5] = Scenario_Cost.loc[name_3,name_5]/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present Bat Reposition Cost (USD)',name_5] = (Scenario_Cost.loc[name_2,name_5] + Scenario_Cost.loc[name_3,name_5])/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present OyM Cost (USD)',name_5] = Scenario_Cost.loc['OyM (USD)',name_5]/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present Operation Cost (USD)',name_5] = Scenario_Cost[name_5]['Operation Cost (USD)']/Project_Data['Capital Recovery Factor']\n Scenario_Cost.loc['Present Operation Cost Weighted (USD)',name_5] = (Scenario_Cost[name_5]['Present Operation Cost (USD)']\n *Scenario_Information[name_5]['Scenario Weight'])\n \n Scenario_Cost.to_excel(writer, sheet_name='Scenario Costs')\n \n NPC = pd.DataFrame()\n NPC.loc['Battery Invesment (USD)', 'Data'] = Battery_Data['Battery']['Invesment Cost (USD)'] \n \n gen_Invesment = 0\n for g in range(1, Number_Generator + 1):\n Name_2 = 'Generator ' + str(g)\n gen_Invesment += Generator_Data.loc['Invesment Generator (USD)', Name_2]\n NPC.loc['Generator Invesment Cost (USD)', 'Data'] = gen_Invesment\n \n renewable_energy_invesment = 0\n for r in range(1, Number_Renewable_Source + 1):\n Name = 'Source ' + str(r)\n renewable_energy_invesment += Data_Renewable.loc['Invesment (USD)', Name]\n NPC.loc['Renewable Investment Cost (USD)', 'Data'] = renewable_energy_invesment \n \n operation_cost = 0\n for s in range(1, Number_Scenarios + 1):\n name_1 = 'Scenario ' + str(s)\n operation_cost += Scenario_Cost[name_1]['Present Operation Cost Weighted (USD)']\n\n NPC.loc['Present Operation Cost Weighted (USD)', 'Data'] = operation_cost\n\n \n NPC.loc['NPC (USD)', 'Data'] = NPC['Data'].sum()\n \n z = round(NPC.loc['NPC (USD)', 'Data'],5) == round(instance.ObjectiveFuntion.expr(), 5)\n print(z)\n \n NPC.loc['NPC LP (USD)', 'Data'] = Project_Data['Net Present Cost (USD)']\n NPC.loc['Invesment (USD)', 'Data'] = (NPC.loc['Battery Invesment (USD)', 'Data'] \n + NPC.loc['Generator Invesment Cost (USD)', 'Data'] \n + NPC.loc['Renewable Investment Cost (USD)', 'Data'])\n \n \n \n Demand = pd.DataFrame()\n NP_Demand = 0\n for s in range(1, Number_Scenarios + 1):\n a = 'Energy Demand ' + str(s) + ' (Wh)'\n b = 'Scenario ' + str(s)\n Demand.loc[a,'Total Demand (Wh)'] = sum(Scenarios[a][i] for i in Scenarios.index)\n Demand.loc[a,'Present Demand (Wh)'] = sum((Demand.loc[a,'Total Demand (Wh)']/(1+Discount_rate)**i) \n for i in range(1, Years+1)) \n Demand.loc[a,'Rate'] = Scenario_Information[b]['Scenario Weight'] \n Demand.loc[a,'Rated Demand (Wh)'] = Demand.loc[a,'Rate']*Demand.loc[a,'Present Demand (Wh)'] \n NP_Demand += Demand.loc[a,'Rated Demand (Wh)']\n NPC.loc['LCOE (USD/kWh)', 'Data'] = (Project_Data['Net Present Cost (USD)']/NP_Demand)\n NPC.loc['Status','Data'] = z\n \n NPC.to_excel(writer, sheet_name='Results')\n \n Data = []\n Data.append(NPC)\n Data.append(Scenario_Cost)\n Data.append(Project_Data)\n Data.append(Scenarios)\n Data.append(Generator_Data)\n Data.append(Scenario_Information)\n Data.append(Data_Renewable)\n Data.append(Battery_Data)\n \n writer.save()\n\n return Data\n\ndef Integer_Time_Series(instance,Scenarios, S, Data):\n \n if S == 0:\n S = instance.PlotScenario.value\n \n Time_Series = pd.DataFrame(index=range(0,8760))\n Time_Series.index = Scenarios.index\n if instance.Lost_Load_Probability > 0:\n Time_Series['Lost Load (Wh)'] = Scenarios['Lost Load ' + str(S) + ' (Wh)']\n Time_Series['Renewable Energy (Wh)'] = Scenarios['Renewable Energy '+str(S) + ' (Wh)']\n Time_Series['Discharge energy from the Battery (Wh)'] = Scenarios['Battery Flow Out ' + str(S) + ' (Wh)'] \n Time_Series['Charge energy to the Battery (Wh)'] = Scenarios['Battery Flow in '+str(S) + ' (Wh)']\n Time_Series['Curtailment (Wh)'] = Scenarios['Curtailment '+str(S) + ' (Wh)']\n Time_Series['Energy Demand (Wh)'] = Scenarios['Energy Demand '+str(S) + ' (Wh)']\n Time_Series['State Of Charge Battery (Wh)'] = Scenarios['SOC '+str(S) + ' (Wh)'] \n Time_Series['Generator Energy (Wh)'] = Scenarios['Gen energy '+str(S) + ' (Wh)']\n \n Renewable_Source = instance.Renewable_Source.value\n if Renewable_Source > 1:\n Renewable_Energy = pd.read_excel('Results/Results.xls',index_col=0,Header=None,\n sheet_name='Renewable Energy Time Series')\n for r in range(1,Renewable_Source+1):\n name = 'Renewable ' + str(S) + ' ' + str(r) + ' (Wh)'\n name_1 = 'Renewable ' + str(r) + ' (Wh)'\n Time_Series[name_1] = Renewable_Energy[name]\n \n return Time_Series \n \n \n\ndef Load_results1_binary(instance):\n '''\n This function loads the results that depend of the periods in to a \n dataframe and creates a excel file with it.\n \n :param instance: The instance of the project resolution created by PYOMO.\n \n :return: A dataframe called Time_series with the values of the variables \n that depend of the periods. \n '''\n\n# Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'\n \n Number_Scenarios = int(instance.Scenarios.extract_values()[None])\n Number_Periods = int(instance.Periods.extract_values()[None])\n \n #Scenarios = [[] for i in range(Number_Scenarios)]\n \n columns = []\n for i in range(1, Number_Scenarios+1):\n columns.append('Scenario_'+str(i))\n\n# columns=columns\n Scenarios = pd.DataFrame()\n \n \n Lost_Load = instance.Lost_Load.get_values()\n PV_Energy = instance.Total_Energy_PV.get_values()\n Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()\n Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()\n Curtailment = instance.Energy_Curtailment.get_values()\n Energy_Demand = instance.Energy_Demand.extract_values()\n SOC = instance.State_Of_Charge_Battery.get_values()\n Gen_Energy_Integer = instance.Generator_Energy_Integer.get_values()\n Gen_Energy_I = {}\n \n for i in range(1,Number_Scenarios+1):\n for j in range(1, Number_Periods+1):\n Gen_Energy_I[i,j]=(Gen_Energy_Integer[i,j]*instance.Generator_Nominal_Capacity.extract_values()[None]) \n \n Last_Generator_Energy = instance.Last_Energy_Generator.get_values() \n Total_Generator_Energy = instance.Generator_Total_Period_Energy.get_values() \n Gen_cost = instance.Period_Total_Cost_Generator.get_values() \n \n Scenarios_Periods = [[] for i in range(Number_Scenarios)]\n \n for i in range(0,Number_Scenarios):\n for j in range(1, Number_Periods+1):\n Scenarios_Periods[i].append((i+1,j))\n foo=0 \n for i in columns:\n Information = [[] for i in range(11)]\n for j in Scenarios_Periods[foo]:\n Information[0].append(Lost_Load[j])\n Information[1].append(PV_Energy[j]) \n Information[2].append(Battery_Flow_Out[j]) \n Information[3].append(Battery_Flow_in[j]) \n Information[4].append(Curtailment[j]) \n Information[5].append(Energy_Demand[j]) \n Information[6].append(SOC[j])\n Information[7].append(Gen_Energy_I[j])\n Information[8].append(Last_Generator_Energy[j])\n Information[9].append(Total_Generator_Energy[j])\n Information[10].append(Gen_cost[j])\n \n Scenarios=Scenarios.append(Information)\n foo+=1\n \n index=[] \n for j in range(1, Number_Scenarios+1): \n index.append('Lost_Load '+str(j))\n index.append('PV_Energy '+str(j))\n index.append('Battery_Flow_Out '+str(j)) \n index.append('Battery_Flow_in '+str(j))\n index.append('Curtailment '+str(j))\n index.append('Energy_Demand '+str(j))\n index.append('SOC '+str(j))\n index.append('Gen energy Integer '+str(j))\n index.append('Last Generator Energy '+str(j))\n index.append('Total Generator Energy '+str(j))\n index.append('Total Cost Generator'+str(j))\n Scenarios.index= index\n \n \n \n \n # Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'\n if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes\n foo = str(instance.Delta_Time()) # trasform the number into a string\n hour = foo[0] # Extract the first character\n minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency\n elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency\n else: # if the step is in minutes\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency\n \n Scenarios.columns = columns\n Scenarios = Scenarios.transpose()\n \n Scenarios.to_excel('Results/Time_Series.xls') # Creating an excel file with the values of the variables that are in function of the periods\n \n columns = [] # arreglar varios columns\n for i in range(1, Number_Scenarios+1):\n columns.append('Scenario_'+str(i))\n \n Scenario_information =[[] for i in range(Number_Scenarios)]\n Scenario_NPC = instance.Scenario_Net_Present_Cost.get_values()\n LoL_Cost = instance.Scenario_Lost_Load_Cost.get_values() \n Scenario_Weight = instance.Scenario_Weight.extract_values()\n Diesel_Cost = instance.Sceneario_Generator_Total_Cost.get_values()\n \n for i in range(1, Number_Scenarios+1):\n Scenario_information[i-1].append(Scenario_NPC[i])\n Scenario_information[i-1].append(LoL_Cost[i])\n Scenario_information[i-1].append(Scenario_Weight[i])\n Scenario_information[i-1].append(Diesel_Cost[i])\n \n \n Scenario_Information = pd.DataFrame(Scenario_information,index=columns)\n Scenario_Information.columns=['Scenario NPC', 'LoL Cost','Scenario Weight', 'Diesel Cost']\n Scenario_Information = Scenario_Information.transpose()\n \n Scenario_Information.to_excel('Results/Scenario_Information.xls')\n \n S = instance.PlotScenario.value\n Time_Series = pd.DataFrame(index=range(0,8760))\n Time_Series.index = Scenarios.index\n \n Time_Series['Lost Load'] = Scenarios['Lost_Load '+str(S)]\n Time_Series['Energy PV'] = Scenarios['PV_Energy '+str(S)]\n Time_Series['Discharge energy from the Battery'] = Scenarios['Battery_Flow_Out '+str(S)] \n Time_Series['Charge energy to the Battery'] = Scenarios['Battery_Flow_in '+str(S)]\n Time_Series['Curtailment'] = Scenarios['Curtailment '+str(S)]\n Time_Series['Energy_Demand'] = Scenarios['Energy_Demand '+str(S)]\n Time_Series['State_Of_Charge_Battery'] = Scenarios['SOC '+str(S)]\n Time_Series['Gen energy Integer'] = Scenarios['Gen energy Integer '+str(S)]\n Time_Series['Last Generator Energy'] = Scenarios['Last Generator Energy ' +str(j)] \n Time_Series['Energy Diesel'] = Scenarios['Total Generator Energy '+str(j)]\n \n \n return Time_Series\n \n \n \n \ndef Load_results2_binary(instance):\n '''\n This function extracts the unidimensional variables into a data frame \n and creates a excel file with this data\n \n :param instance: The instance of the project resolution created by PYOMO. \n \n :return: Data frame called Size_variables with the variables values. \n '''\n # Load the variables that doesnot depend of the periods in python dyctionarys\n Amortizacion = instance.Cost_Financial.get_values()[None]\n cb = instance.PV_Units.get_values()\n cb = cb.values()\n Size_PV=[list(cb)[0]*instance.PV_Nominal_Capacity.value]\n Size_Bat = instance.Battery_Nominal_Capacity.get_values()[None]\n Gen_cap = instance.Generator_Nominal_Capacity.value\n Gen_Power = Gen_cap*instance.Integer_generator.get_values()[None]\n NPC = instance.ObjectiveFuntion.expr()\n Mge_1 = instance.Marginal_Cost_Generator_1.value\n Start_Cost = instance.Start_Cost_Generator.value\n Funded= instance.Porcentage_Funded.value\n DiscountRate = instance.Discount_Rate.value\n InterestRate = instance.Interest_Rate_Loan.value\n PricePV = instance.PV_invesment_Cost.value\n PriceBatery= instance.Battery_Invesment_Cost.value\n PriceGenSet= instance.Generator_Invesment_Cost.value\n OM = instance.Maintenance_Operation_Cost_PV.value\n Years=instance.Years.value\n VOLL= instance.Value_Of_Lost_Load.value\n Mge_2 = instance.Marginal_Cost_Generator.value\n data3 = [Amortizacion, Size_PV[0], Size_Bat, Gen_cap, Gen_Power,NPC,Mge_1, Mge_2 , \n Start_Cost, Funded,DiscountRate,InterestRate,PricePV,PriceBatery,\n PriceGenSet,OM,Years,VOLL] # Loading the values to a numpy array \n Size_variables = pd.DataFrame(data3,index=['Amortization', 'Size of the solar panels', \n 'Size of the Battery','Nominal Capacity Generator',\n 'Generator Install power','Net Present Cost',\n 'Marginal cost Full load',\n 'Marginal cost Partial load', 'Start Cost',\n 'Funded Porcentage', 'Discount Rate', \n 'Interest Rate','Precio PV', 'Precio Bateria',\n 'Precio GenSet','OyM', 'Project years','VOLL'])\n Size_variables.to_excel('Results/Size.xls') # Creating an excel file with the values of the variables that does not depend of the periods\n \n I_Inv = instance.Initial_Inversion.get_values()[None] \n O_M = instance.Operation_Maintenance_Cost.get_values()[None] \n Financial_Cost = instance.Total_Finalcial_Cost.get_values()[None] \n Batt_Reposition = instance.Battery_Reposition_Cost.get_values()[None] \n \n Data = [I_Inv, O_M, Financial_Cost,Batt_Reposition]\n Value_costs = pd.DataFrame(Data, index=['Initial Inversion', 'O & M',\n 'Financial Cost', 'Battery reposition'])\n\n Value_costs.to_excel('Results/Partial Costs.xls') \n\n\n VOLL = instance.Scenario_Lost_Load_Cost.get_values() \n Scenario_Generator_Cost = instance.Sceneario_Generator_Total_Cost.get_values() \n NPC_Scenario = instance.Scenario_Net_Present_Cost.get_values() \n \n columns = ['VOLL', 'Scenario Generator Cost', 'NPC Scenario']\n scenarios= range(1,instance.Scenarios.extract_values()[None]+1)\n Scenario_Costs = pd.DataFrame(columns=columns, index=scenarios)\n \n \n for j in scenarios:\n Scenario_Costs['VOLL'][j]= VOLL[j] \n Scenario_Costs['Scenario Generator Cost'][j]= Scenario_Generator_Cost[j]\n Scenario_Costs['NPC Scenario'][j]= NPC_Scenario[j]\n Scenario_Costs.to_excel('Results/Scenario Cost.xls') \n \n return Size_variables\n\ndef Load_results1_Dispatch(instance):\n '''\n This function loads the results that depend of the periods in to a \n dataframe and creates a excel file with it.\n \n :param instance: The instance of the project resolution created by PYOMO.\n \n :return: A dataframe called Time_series with the values of the variables \n that depend of the periods. \n '''\n Names = ['Lost_Load', 'PV_Energy', 'Battery_Flow_Out','Battery_Flow_in',\n 'Curtailment', 'Energy_Demand', 'SOC', 'Gen Int', 'Gen energy',\n 'Total Cost Generator']\n Number_Periods = int(instance.Periods.extract_values()[None])\n Time_Series = pd.DataFrame(columns= Names, index=range(1,Number_Periods+1)) \n \n Lost_Load = instance.Lost_Load.get_values()\n PV_Energy = instance.Total_Energy_PV.extract_values()\n Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()\n Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()\n Curtailment = instance.Energy_Curtailment.get_values()\n Energy_Demand = instance.Energy_Demand.extract_values()\n SOC = instance.State_Of_Charge_Battery.get_values()\n Gen_Energy_Integer = instance.Generator_Energy_Integer.get_values()\n Total_Generator_Energy = instance.Generator_Total_Period_Energy.get_values() \n Gen_cost = instance.Period_Total_Cost_Generator.get_values() \n \n \n for i in range(1,Number_Periods+1):\n Time_Series['Lost_Load'][i] = Lost_Load[i]\n Time_Series['PV_Energy'][i] = PV_Energy[i]\n Time_Series['Battery_Flow_Out'][i] = Battery_Flow_Out[i]\n Time_Series['Battery_Flow_in'][i] = Battery_Flow_in[i]\n Time_Series['Curtailment'][i] = Curtailment[i]\n Time_Series['Energy_Demand'][i] = Energy_Demand[i]\n Time_Series['SOC'][i] = SOC[i]\n Time_Series['Gen Int'][i] = Gen_Energy_Integer[i]\n Time_Series['Gen energy'][i] = Total_Generator_Energy[i]\n Time_Series['Total Cost Generator'][i] = Gen_cost[i] \n\n \n # Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'\n if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes\n foo = str(instance.Delta_Time()) # trasform the number into a string\n hour = foo[0] # Extract the first character\n minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency\n elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency\n else: # if the step is in minutes\n columns = pd.DatetimeIndex(start=instance.StartDate(), \n periods=instance.Periods(), \n freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency\n \n Time_Series.index = columns\n \n Time_Series.to_excel('Results/Time_Series.xls') # Creating an excel file with the values of the variables that are in function of the periods\n \n\n Time_Series_2 = pd.DataFrame() \n Time_Series_2['Lost Load'] = Time_Series['Lost_Load']\n Time_Series_2['Renewable Energy'] = Time_Series['PV_Energy']\n Time_Series_2['Discharge energy from the Battery'] = Time_Series['Battery_Flow_Out']\n Time_Series_2['Charge energy to the Battery'] = Time_Series['Battery_Flow_in']\n Time_Series_2['Curtailment'] = Time_Series['Curtailment']\n Time_Series_2['Energy_Demand'] = Time_Series['Energy_Demand']\n Time_Series_2['State_Of_Charge_Battery'] = Time_Series['SOC']\n Time_Series_2['Energy Diesel'] = Time_Series['Gen energy']\n Time_Series_2['Total Cost Generator'] = Time_Series['Total Cost Generator'] \n\n Time_Series_2.index = columns\n \n \n \n return Time_Series_2\n\ndef Load_results2_Dispatch(instance):\n '''\n This function extracts the unidimensional variables into a data frame \n and creates a excel file with this data\n \n :param instance: The instance of the project resolution created by PYOMO. \n \n :return: Data frame called Size_variables with the variables values. \n '''\n Data = []\n # Load the variables that doesnot depend of the periods in python dyctionarys\n \n Generator_Efficiency = instance.Generator_Efficiency.extract_values()\n Generator_Min_Out_Put = instance.Generator_Min_Out_Put.extract_values()\n Low_Heating_Value = instance.Low_Heating_Value.extract_values()\n Fuel_Cost = instance.Diesel_Cost.extract_values()\n Marginal_Cost_Generator_1 = instance.Marginal_Cost_Generator_1.extract_values()\n Cost_Increase = instance.Cost_Increase.extract_values()\n Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.extract_values()\n Start_Cost_Generator = instance.Start_Cost_Generator.extract_values()\n Marginal_Cost_Generator = instance.Marginal_Cost_Generator.extract_values()\n \n Generator_Data = pd.DataFrame()\n g = None\n Name = 'Generator ' + str(1)\n Generator_Data.loc['Generator Min Out Put',Name] = Generator_Min_Out_Put[g]\n Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]\n Generator_Data.loc['Low Heating Value',Name] = Low_Heating_Value[g]\n Generator_Data.loc['Fuel Cost',Name] = Fuel_Cost[g]\n Generator_Data.loc['Marginal cost Full load',Name] = Marginal_Cost_Generator_1[g]\n Generator_Data.loc['Marginal cost Partial load',Name] = Marginal_Cost_Generator[g]\n Generator_Data.loc['Cost Increase',Name] = Cost_Increase[g]\n Generator_Data.loc['Generator Nominal Capacity',Name] = Generator_Nominal_Capacity[g]\n Generator_Data.loc['Start Cost Generator',Name] = Start_Cost_Generator[g]\n Data.append(Generator_Data) \n Generator_Data.to_excel('Results/Generator_Data.xls') \n \n Size_Bat = instance.Battery_Nominal_Capacity.extract_values()[None]\n O_Cost = instance.ObjectiveFuntion.expr() \n VOLL= instance.Value_Of_Lost_Load.value\n Bat_ef_out = instance.Discharge_Battery_Efficiency.value\n Bat_ef_in = instance.Charge_Battery_Efficiency.value\n DoD = instance.Deep_of_Discharge.value\n Inv_Cost_Bat = instance.Battery_Invesment_Cost.value\n Inv_Cost_elec = instance.Battery_Electronic_Invesmente_Cost.value\n Bat_Cycles = instance.Battery_Cycles.value\n Bat_U_C = Inv_Cost_Bat - Inv_Cost_elec\n Battery_Reposition_Cost= Bat_U_C/(Bat_Cycles*2*(1-DoD))\n Number_Periods = int(instance.Periods.extract_values()[None])\n \n data3 = [Size_Bat, O_Cost, VOLL, Bat_ef_out, Bat_ef_in, DoD, \n Inv_Cost_Bat, Inv_Cost_elec, Bat_Cycles,\n Battery_Reposition_Cost, Number_Periods] # Loading the values to a numpy array \n Results = pd.DataFrame(data3,index = ['Size of the Battery',\n 'Operation Cost', 'VOLL',\n 'Battery efficiency discharge',\n 'Battery efficiency charge',\n 'Deep of discharge',\n 'Battery unitary invesment cost',\n 'Battery electronic unitary cost',\n 'Battery max cycles',\n 'Battery Reposition Cost',\n 'Number of periods'])\n Results.to_excel('Results/Size.xls') # Creating an excel file with the values of the variables that does not depend of the periods\n Data.append(Results) \n return Data\n\ndef Dispatch_Economic_Analysis(Results,Time_Series):\n Data = []\n Generator_Data = Results[0]\n Result = Results[1]\n Time_Series_Economic = pd.DataFrame()\n for t in Time_Series.index:\n name_1 = \"Fuel\"\n name_2 = \"Discharge energy from the Battery\"\n name_3 = \"Charge energy to the Battery\"\n name_4 = 'Battery Reposition Cost'\n name_5 = 'Battery operation Cost'\n name_6 = 'VOLL'\n Power_Bat = Time_Series[name_2][t] + Time_Series[name_3][t]\n Time_Series_Economic.loc[t,name_5] = Power_Bat*Result[0][name_4]\n LL = Time_Series['Lost Load'][t]\n Time_Series_Economic.loc[t,name_6] = LL*Result[0][name_6]\n \n if Time_Series['Energy Diesel'][t] > 0.1:\n a = Generator_Data['Generator 1']['Start Cost Generator'] \n b = Generator_Data['Generator 1']['Marginal cost Partial load']\n Time_Series_Economic.loc[t,name_1]=a + b*Time_Series['Energy Diesel'][t]\n \n else:\n Time_Series_Economic.loc[t,name_1]= 0 \n \n Operation_Cost = Time_Series_Economic.sum()\n Operation_Cost['Total Cost'] = Operation_Cost.sum() \n Data.append(Time_Series_Economic)\n Data.append(Operation_Cost)\n \n return Data\n \n \n \ndef Plot_Energy_Total(instance, Time_Series, plot, Plot_Date, PlotTime): \n '''\n This function creates a plot of the dispatch of energy of a defined number of days.\n \n :param instance: The instance of the project resolution created by PYOMO. \n :param Time_series: The results of the optimization model that depend of the periods.\n \n \n '''\n \n if plot == 'No Average':\n Periods_Day = 24/instance.Delta_Time() # periods in a day\n foo = pd.DatetimeIndex(start=Plot_Date,periods=1,freq='1h')# Asign the start date of the graphic to a dumb variable\n for x in range(0, instance.Periods()): # Find the position form wich the plot will start in the Time_Series dataframe\n if foo == Time_Series.index[x]: \n Start_Plot = x # asign the value of x to the position where the plot will start \n End_Plot = Start_Plot + PlotTime*Periods_Day # Create the end of the plot position inside the time_series\n Time_Series.index=range(1,(len(Time_Series)+1))\n Plot_Data = Time_Series[Start_Plot:int(End_Plot)] # Extract the data between the start and end position from the Time_Series\n columns = pd.DatetimeIndex(start=Plot_Date, \n periods=PlotTime*Periods_Day, \n freq=('1H')) \n Plot_Data.index=columns\n \n Plot_Data = Plot_Data.astype('float64')\n Plot_Data = Plot_Data\n Plot_Data['Charge energy to the Battery (Wh)'] = -Plot_Data['Charge energy to the Battery (Wh)']\n Plot_Data = round(Plot_Data,2) \n Fill = pd.DataFrame()\n \n r = 'Renewable Energy (Wh)'\n \n g = 'Generator Energy (Wh)'\n c = 'Curtailment (Wh)'\n c2 ='Curtailment min (Wh)'\n b = 'Discharge energy from the Battery (Wh)'\n d = 'Energy Demand (Wh)'\n ch = 'Charge energy to the Battery (Wh)'\n SOC = 'State Of Charge Battery (Wh)'\n Renewable_Source = instance. Renewable_Source.value\n \n\n \n for t in Plot_Data.index:\n if (Plot_Data[r][t] > 0 and Plot_Data[g][t]>0):\n curtailment = Plot_Data[c][t]\n Fill.loc[t,r] = Plot_Data[r][t] \n\n Fill.loc[t,g] = Fill[r][t] + Plot_Data[g][t]-curtailment\n Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]\n Fill.loc[t,c2] = Fill.loc[t,g]\n elif Plot_Data[r][t] > 0:\n Fill.loc[t,r] = Plot_Data[r][t]-Plot_Data[c][t]\n Fill.loc[t,g] = Fill[r][t] + Plot_Data[g][t]\n Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]+Plot_Data[c][t]\n Fill.loc[t,c2] = Plot_Data[r][t]-Plot_Data[c][t]\n elif Plot_Data[g][t] > 0:\n Fill.loc[t,r] = 0\n Fill.loc[t,g]= (Fill[r][t] + Plot_Data[g][t] - Plot_Data[c][t] )\n Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]\n Fill.loc[t,c2] = (Fill[r][t] + Plot_Data[g][t] - Plot_Data[c][t] )\n else:\n Fill.loc[t,r] = 0\n Fill.loc[t,g]= 0\n if Plot_Data[g][t] == 0:\n Fill.loc[t,c] = Plot_Data[g][t]\n Fill.loc[t,c2] = Plot_Data[g][t]\n else:\n if Plot_Data[g][t] > 0:\n Fill.loc[t,c] = Plot_Data[g][t]\n Fill.loc[t,c2] = Plot_Data[d][t]\n else:\n Fill.loc[t,c] = Plot_Data[b][t]\n Fill.loc[t,c2] = Plot_Data[d][t]\n \n if Renewable_Source > 1:\n for R in range(1,Renewable_Source+1):\n name = 'Renewable ' + str(R) + ' (Wh)'\n if R == 1: \n Fill[name] = Plot_Data[name]\n else:\n name_1 = 'Renewable ' + str(R-1) + ' (Wh)'\n Fill[name] = Fill[name_1] + Plot_Data[name]\n\n\n \n Fill[b] = (Fill[g] + Plot_Data[b])\n Fill[d] = Plot_Data[d]\n Fill[ch] = Plot_Data[ch]\n Fill[SOC] = Plot_Data[SOC]\n \n Fill.index = columns\n New = pd.DataFrame()\n\n \n for t in Fill.index[:-1]:\n if Fill[b][t] > Fill[g][t]:\n if Fill[r][t+1]>Fill[d][t+1]:\n print(t)\n b_d = (Fill[d][t+1] - Fill[d][t])/60\n b_g = (Fill[g][t+1] - Fill[g][t])/60\n \n a_d = Fill[d][t]\n a_g = Fill[g][t]\n \n x = (a_g - a_d)/(b_d - b_g)\n x = round(x,4)\n second, minute = math.modf(x)\n minute = int(minute)\n second = second*60\n second = int(second)\n \n if x < 60:\n t_1 = t\n t_1 = t_1.replace(minute=minute, second=second, microsecond=0)\n \n xp = [0, 60]\n \n New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])\n New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])\n New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])\n New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])\n New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])\n New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])\n if Renewable_Source > 1:\n for R in range(1,Renewable_Source+1):\n name = 'Renewable ' + str(R) + ' (Wh)'\n New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])\n\n\n\n \n \n for t in Fill.index[:-1]:\n if (Fill[b][t] == Fill[d][t]) and (Fill[g][t+1] > Plot_Data[d][t+1]):\n if Fill[b][t] > Fill[g][t]:\n print(t)\n b_d = (Fill[d][t+1] - Fill[d][t])/60\n b_g = (Fill[g][t+1] - Fill[g][t])/60\n \n a_d = Fill[d][t]\n a_g = Fill[g][t]\n \n x = (a_g - a_d)/(b_d - b_g)\n x = round(x,4)\n second, minute = math.modf(x)\n minute = int(minute)\n second = second*60\n second = int(second)\n \n if x < 60:\n t_1 = t\n t_1 = t_1.replace(minute=minute, second=second, microsecond=0)\n \n xp = [0, 60]\n \n New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])\n New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])\n New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])\n New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])\n New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])\n New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])\n if Renewable_Source > 1:\n for R in range(1,Renewable_Source+1):\n name = 'Renewable ' + str(R) + ' (Wh)'\n New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])\n\n \n # fix the battery if in one step before the energy production is more than demand\n # and in time t the battery is used \n for t in Fill.index[1:-1]:\n if Fill[g][t] > Plot_Data[d][t] and Fill[b][t+1] == Plot_Data[d][t+1] and Plot_Data[b][t+1] > 0:\n print(t)\n b_d = (Fill[d][t+1] - Fill[d][t])/60\n b_g = (Fill[g][t+1] - Fill[g][t])/60\n \n a_d = Fill[d][t]\n a_g = Fill[g][t]\n \n x = (a_g - a_d)/(b_d - b_g)\n x = round(x,4)\n second, minute = math.modf(x)\n minute = int(minute)\n second = second*60\n second = int(second)\n \n if x < 60:\n t_1 = t\n t_1 = t_1.replace(minute=minute, second=second, microsecond=0)\n \n xp = [0, 60]\n \n New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])\n New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])\n New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])\n New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])\n New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])\n New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])\n New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])\n if Renewable_Source > 1:\n for R in range(1,Renewable_Source+1):\n name = 'Renewable ' + str(R) + ' (Wh)'\n New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])\n\n Fill = Fill.append(New)\n Fill.sort_index(inplace=True)\n\n size = [20,10] \n plt.figure(figsize=size)\n# Fill[b] = ( Fill[g] + Plot_Data[b])\n # Renewable energy\n \n # For 1 energy Source \n if Renewable_Source == 1:\n c_PV = 'yellow' \n Alpha_r = 0.4 \n ax1 =Fill[r].plot(style='y-', linewidth=1)\n ax1.fill_between(Fill.index, 0,Fill[r].values, \n alpha=Alpha_r, color = c_PV) \n else:\n c_r = ['aqua', 'chocolate', 'lightcoral', 'lightgreen']\n for R in range (1, Renewable_Source+1):\n name = 'Renewable ' + str(R) + ' (Wh)'\n print(name)\n if R == 1:\n c_PV_1 = 'yellow' \n Alpha_r = 0.4 \n ax1 = Fill[name].plot(style='y-', linewidth=0)\n ax1.fill_between(Fill.index, 0, Fill[name].values, \n alpha=Alpha_r, color = c_PV_1)\n elif R == Renewable_Source:\n name_1 = 'Renewable ' + str(R-1) + ' (Wh)'\n c_r_1 = c_r[R-1] \n Alpha_r = 0.4 \n ax1 = Fill[r].plot(style='c-', linewidth=0)\n ax1.fill_between(Fill.index, Fill[name_1].values, Fill[r].values, \n alpha=Alpha_r, color =c_r_1)\n else:\n name_1 = 'Renewable ' + str(R-1) + ' (Wh)'\n c_r_1 = c_r[R-1] \n Alpha_r = 0.4 \n ax1 = Fill[r].plot(style='c-', linewidth=0)\n ax1.fill_between(Fill.index, Fill[name_1].values, Fill[name].values, \n alpha=Alpha_r, color =c_r_1)\n \n \n # Genset Plot\n c_d = 'm'\n Alpha_g = 0.3 \n hatch_g = '\\\\'\n ax2 = Fill[g].plot(style='c', linewidth=0)\n ax2.fill_between(Fill.index, Fill[r].values, Fill[g].values,\n alpha=Alpha_g, color=c_d, edgecolor=c_d , hatch =hatch_g)\n # Battery discharge\n alpha_bat = 0.3\n hatch_b ='x'\n C_Bat = 'green'\n ax3 = Fill[b].plot(style='b', linewidth=0)\n ax3.fill_between(Fill.index, Fill[g].values, Fill[b].values, \n alpha=alpha_bat, color =C_Bat,edgecolor=C_Bat, hatch =hatch_b)\n # Demand\n ax4 = Plot_Data[d].plot(style='k', linewidth=2, marker= 'o')\n # Battery Charge \n ax5= Fill[ch].plot(style='m', linewidth=0.5) # Plot the line of the energy flowing into the battery\n ax5.fill_between(Fill.index, 0, \n Fill[ch].values\n , alpha=alpha_bat, color=C_Bat,edgecolor= C_Bat, hatch ='x') \n # State of charge of battery\n ax6= Fill[SOC].plot(style='k--', \n secondary_y=True, linewidth=2, alpha=0.7 ) \n # Curtailment\n alpha_cu = 0.3\n hatch_cu = '+'\n C_Cur = 'blue'\n ax7 = Fill[c].plot(style='b-', linewidth=0)\n ax7.fill_between(Fill.index, Fill[c2].values , Fill[c].values, \n alpha=alpha_cu, color=C_Cur,edgecolor= C_Cur, \n hatch =hatch_cu,\n where=Fill[c].values>Fill[d]) \n # Lost load\n \n if instance.Lost_Load_Probability > 0:\n \n alpha_LL = 0.3\n hatch_LL = '-'\n C_LL = 'crimson'\n ax4.fill_between(Fill.index, Fill[b].values, Fill[d].values, \n alpha=alpha_LL, color=C_LL,edgecolor= C_LL, \n hatch =hatch_LL) \n \n \n # Define name and units of the axis\n ax1.set_ylabel('Power (kW)',size=30)\n ax1.set_xlabel('Time',size=30)\n ax6.set_ylabel('Battery State of charge (kWh)',size=30)\n ax1.set_xlim(Fill.index[0], Fill.index[len(Fill)-1])\n tick_size = 15 \n #mpl.rcParams['xtick.labelsize'] = tick_size \n ax1.tick_params(axis='x', which='major', labelsize = tick_size,pad=8 ) \n ax1.tick_params(axis='y', which='major', labelsize = tick_size )\n # ax1.tick_params(axis='x', which='major', labelsize = tick_size) \n ax6.tick_params(axis='y', which='major', labelsize = tick_size ) \n # Define the legends of the plot\n From_Renewable =[]\n for R in range(1, Renewable_Source + 1):\n if R == 1:\n From_Renewable.append(mpatches.Patch(color='yellow',alpha=Alpha_r, label='Renewable 1')) \n else:\n name = 'From Renewable ' +str(R) \n c_r_1 = c_r[R-1] \n foo = mpatches.Patch(color=c_r_1,alpha=Alpha_r, label=name)\n From_Renewable.append(foo)\n \n From_Generator = mpatches.Patch(color=c_d,alpha=Alpha_g,\n label='From Generator',hatch =hatch_g)\n Battery = mpatches.Patch(color=C_Bat ,alpha=alpha_bat, \n label='Battery Energy Flow',hatch =hatch_b)\n Curtailment = mpatches.Patch(color=C_Cur ,alpha=alpha_cu, \n label='Curtailment',hatch =hatch_cu)\n\n Energy_Demand = mlines.Line2D([], [], color='black',label='Energy Demand')\n State_Of_Charge_Battery = mlines.Line2D([], [], color='black',\n label='State Of Charge Battery',\n linestyle='--',alpha=0.7)\n \n \n Legends = []\n \n Legends.append(From_Generator)\n for R in range(Renewable_Source):\n Legends.append(From_Renewable[R])\n Legends.append(Battery)\n Legends.append(Curtailment)\n Legends.append(Energy_Demand)\n Legends.append(State_Of_Charge_Battery)\n \n if instance.Lost_Load_Probability > 0:\n Lost_Load = mpatches.Patch(color=C_LL,alpha=alpha_LL,\n label='Lost Laod',hatch =hatch_LL)\n Legends.append(Lost_Load)\n \n plt.legend(handles=Legends,\n bbox_to_anchor=(1.025, -0.15),fontsize = 20,\n frameon=False, ncol=4)\n plt.savefig('Results/Energy_Dispatch.png', bbox_inches='tight') \n plt.show() \n \n else: \n start = Time_Series.index[0]\n end = Time_Series.index[instance.Periods()-1]\n Time_Series = Time_Series.astype('float64')\n Plot_Data_2 = Time_Series[start:end].groupby([Time_Series[start:end].index.hour]).mean()\n Plot_Data_2 = Plot_Data_2/1000\n Plot_Data_2['Charge energy to the Battery'] = -Plot_Data_2['Charge energy to the Battery']\n Plot_Data = Plot_Data_2\n Vec = Plot_Data['Renewable Energy'] + Plot_Data['Energy Diesel']\n Vec2 = (Plot_Data['Renewable Energy'] + Plot_Data['Energy Diesel'] + \n Plot_Data['Discharge energy from the Battery'])\n \n \n ax1= Vec.plot(style='b-', linewidth=0.5) # Plot the line of the diesel energy plus the PV energy\n ax1.fill_between(Plot_Data.index, Plot_Data['Energy Diesel'].values, Vec.values, \n alpha=0.3, color = 'b')\n ax2= Plot_Data['Energy Diesel'].plot(style='r', linewidth=0.5)\n ax2.fill_between(Plot_Data.index, 0, Plot_Data['Energy Diesel'].values, \n alpha=0.2, color='r') # Fill the area of the energy produce by the diesel generator\n ax3 = Plot_Data['Energy_Demand'].plot(style='k', linewidth=2)\n ax3.fill_between(Plot_Data.index, Vec.values , \n Plot_Data['Energy_Demand'].values,\n alpha=0.3, color='g', \n where= Plot_Data['Energy_Demand']>= Vec,interpolate=True)\n ax5= Plot_Data['Charge energy to the Battery'].plot(style='m', linewidth=0.5) # Plot the line of the energy flowing into the battery\n ax5.fill_between(Plot_Data.index, 0, \n Plot_Data['Charge energy to the Battery'].values\n , alpha=0.3, color='m') # Fill the area of the energy flowing into the battery\n ax6= Plot_Data['State_Of_Charge_Battery'].plot(style='k--', secondary_y=True, linewidth=2, alpha=0.7 ) # Plot the line of the State of charge of the battery\n \n # Define name and units of the axis\n \n\n\n\n\n # Define name and units of the axis\n ax1.set_ylabel('Power (kW)')\n ax1.set_xlabel('hours')\n ax6.set_ylabel('Battery State of charge (kWh)')\n \n # Define the legends of the plot\n From_PV = mpatches.Patch(color='blue',alpha=0.3, label='From PV')\n From_Generator = mpatches.Patch(color='red',alpha=0.3, label='From Generator')\n From_Battery = mpatches.Patch(color='green',alpha=0.5, label='From Battery')\n To_Battery = mpatches.Patch(color='magenta',alpha=0.5, label='To Battery')\n Lost_Load = mpatches.Patch(color='yellow', alpha= 0.3, label= 'Lost Load')\n Energy_Demand = mlines.Line2D([], [], color='black',label='Energy Demand')\n State_Of_Charge_Battery = mlines.Line2D([], [], color='black',\n label='State Of Charge Battery',\n linestyle='--',alpha=0.7)\n plt.legend(handles=[From_Generator, From_PV, From_Battery, \n To_Battery, Lost_Load, Energy_Demand, \n State_Of_Charge_Battery], bbox_to_anchor=(1.83, 1))\n plt.savefig('Results/Energy_Dispatch.png', bbox_inches='tight') \n plt.show() \n \ndef Energy_Mix(instance,Scenarios,Scenario_Probability):\n \n Number_Scenarios = int(instance.Scenarios.extract_values()[None])\n Energy_Totals = Scenarios.sum()\n \n PV_Energy = 0 \n Generator_Energy = 0\n Curtailment = 0\n Battery_Out = 0\n Demand = 0\n Lost_Load = 0\n Energy_Mix = pd.DataFrame()\n \n for s in range(1, Number_Scenarios+1): \n \n index_1 = 'Renewable Energy ' + str(s) + ' (Wh)' \n index_2 = 'Gen energy ' + str(s) + ' (Wh)'\n index_3 = 'Scenario ' + str(s)\n index_4 = 'Curtailment ' + str(s) + ' (Wh)'\n index_5 = 'Battery Flow Out ' + str(s) + ' (Wh)'\n index_6 = 'Energy Demand ' + str(s) + ' (Wh)'\n index_7 = 'Lost Load '+str(s) + ' (Wh)'\n \n PV = Energy_Totals[index_1]\n Ge = Energy_Totals[index_2]\n We = Scenario_Probability[index_3]\n Cu = Energy_Totals[index_4]\n B_O = Energy_Totals[index_5] \n De = Energy_Totals[index_6] \n \n PV_Energy += PV*We\n Generator_Energy += Ge*We \n Curtailment += Cu*We\n Battery_Out += B_O*We\n Demand += De*We\n \n \n Energy_Mix.loc['PV Penetration',index_3] = PV/(PV+Ge)\n Energy_Mix.loc['Curtailment Percentage',index_3] = Cu/(PV+Ge)\n Energy_Mix.loc['Battery Usage',index_3] = B_O/De\n \n if instance.Lost_Load_Probability > 0:\n LL = Energy_Totals[index_7]*We \n Lost_Load += LL*We\n Energy_Mix.loc['Lost Load', index_3] = LL/De\n \n Renewable_Real_Penetration = PV_Energy/(PV_Energy+Generator_Energy) \n Curtailment_Percentage = Curtailment/(PV_Energy+Generator_Energy)\n Battery_Usage = Battery_Out/Demand\n \n print(str(round(Renewable_Real_Penetration*100, 1)) + ' % Renewable Penetration')\n print(str(round(Curtailment_Percentage*100,1)) + ' % of energy curtail')\n print(str(round(Battery_Usage*100,1)) + ' % Battery usage')\n \n if instance.Lost_Load_Probability > 0:\n foo = []\n for s in range(1, Number_Scenarios+1):\n name = 'Scenario ' + str(s)\n foo.append(name)\n \n Lost_Load_Real = sum(Energy_Mix.loc['Lost Load', name] for name in foo)\n print(str(round(Lost_Load_Real*100,1)) + ' % Lost load in the system')\n \n return Energy_Mix \n \n \ndef Print_Results(instance, Generator_Data, Data_Renewable, Battery_Data ,Results, \n formulation):\n Number_Renewable_Source = int(instance.Renewable_Source.extract_values()[None])\n Number_Generator = int(instance.Generator_Type.extract_values()[None])\n \n for i in range(1, Number_Renewable_Source + 1):\n index_1 = 'Source ' + str(i)\n index_2 = 'Total Nominal Capacity (W)'\n \n Renewable_Rate = float(Data_Renewable[index_1][index_2])\n Renewable_Rate = round(Renewable_Rate, 1)\n print('Renewable ' + str(i) + ' nominal capacity is ' \n + str(Renewable_Rate) +' kW') \n if formulation == 'LP': \n for i in range(1, Number_Generator + 1):\n index_1 = 'Generator ' + str(i)\n index_2 = 'Generator Nominal Capacity (W)'\n \n Generator_Rate = float(Generator_Data[index_1][index_2])\n Generator_Rate = round(Generator_Rate, 1)\n \n print('Generator ' + str(i) + ' nominal capacity is ' \n + str(Generator_Rate) +' kW') \n if formulation == 'MILP': \n Number_Generator = int(instance.Generator_Type.extract_values()[None])\n for i in range(1, Number_Generator + 1):\n index_1 = 'Generator ' + str(i)\n index_2 = 'Generator Nominal Capacity (W)'\n index_3 = 'Number of Generators'\n Generator_Rate = float(Generator_Data[index_1][index_2])\n Generator_Rate = round(Generator_Rate, 1)\n Generator_Rate = Generator_Rate*Generator_Data[index_1][index_3]\n print('Generator ' + str(i) + ' nominal capacity is ' \n + str(Generator_Rate) +' kW') \n \n index_2 = 'Nominal Capacity (Wh)' \n Battery_Rate = Battery_Data['Battery'][index_2]\n Battery_Rate = round(Battery_Rate, 1)\n \n print('Battery nominal capacity is ' \n + str(Battery_Rate) +' kWh') \n \n index_2 = 'NPC (USD)' \n NPC = Results['Data'][index_2]/1000\n NPC = round(NPC, 0)\n \n print('NPC is ' + str(NPC) +' Thousand USD') \n \n \n index_2 = 'LCOE (USD/kWh)' \n LCOE = Results['Data'][index_2]\n LCOE = round(LCOE, 3)\n \n print('The LCOE is ' + str(LCOE) + ' USD/kWh') \n\n\n \ndef Print_Results_Dispatch(instance, Economic_Results):\n Operation_Costs = Economic_Results[1]\n Fuel_Cost = round(Operation_Costs['Fuel'],2) \n \n print('Diesel cost is ' + str(Fuel_Cost) + ' USD')\n \n LL_Cost = round(Operation_Costs['VOLL'],2) \n \n print('Lost load cost is ' + str(LL_Cost) + ' USD')\n \n Battery_Cost = round(Operation_Costs['Battery operation Cost'],2) \n \n print('Battery operation cost is ' + str(Battery_Cost) + ' USD')\n \n Total_Cost = round(Operation_Costs['Total Cost'],2) \n \n print('Total operation cost is ' + str(Total_Cost) + ' USD')\n \n \ndef Energy_Mix_Dispatch(instance,Time_Series):\n \n Energy_Totals = Time_Series.sum()\n \n PV_Energy = Energy_Totals['Renewable Energy']\n Generator_Energy = Energy_Totals['Energy Diesel']\n Curtailment = Energy_Totals['Curtailment']\n Demand = Energy_Totals['Energy_Demand']\n Battery_Out = Energy_Totals['Discharge energy from the Battery']\n\n Renewable_Real_Penetration = PV_Energy/(PV_Energy+Generator_Energy)\n Renewable_Real_Penetration = round(Renewable_Real_Penetration,4)\n Curtailment_Percentage = Curtailment/(PV_Energy+Generator_Energy)\n Curtailment_Percentage = round(Curtailment_Percentage,4)\n Battery_Usage = Battery_Out/Demand\n Battery_Usage = round(Battery_Usage,4)\n \n print(str(Renewable_Real_Penetration*100) + ' % Renewable Penetration')\n print(str(Curtailment_Percentage*100) + ' % of energy curtail')\n print(str(Battery_Usage*100) + ' % Battery usage')\n ", "sub_path": "Results.py", "file_name": "Results.py", "file_ext": "py", "file_size_in_byte": 75742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.ExcelWriter", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 201, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 267, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 297, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 348, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 379, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 441, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 476, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 511, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 525, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 559, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 627, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 631, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 635, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 661, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 668, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 721, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 737, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 749, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 774, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 806, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 810, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 814, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 823, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 862, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 893, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 911, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 953, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 960, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 969, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1033, "usage_type": "call"}, {"api_name": "math.modf", "line_number": 1048, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1059, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1060, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1061, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1062, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1063, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1064, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1065, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1066, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1070, "usage_type": "call"}, {"api_name": "math.modf", "line_number": 1088, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1099, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1100, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1101, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1102, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1103, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1104, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1105, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1106, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1110, "usage_type": "call"}, {"api_name": "math.modf", "line_number": 1126, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1137, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1138, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1139, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1140, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1141, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1142, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1143, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1144, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 1148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1154, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1252, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1252, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1256, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1256, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1259, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1259, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1261, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1261, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1263, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1263, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 1266, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 1266, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 1267, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 1267, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1283, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1291, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1335, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1335, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1336, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1336, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1337, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1337, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1338, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1338, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 1339, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 1339, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 1340, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 1340, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 1341, "usage_type": "call"}, {"api_name": "matplotlib.lines", "line_number": 1341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1347, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1348, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1361, "usage_type": "call"}]} +{"seq_id": "6415726", "text": "# (C) Datadog, Inc. 2021-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport os\nfrom pathlib import Path\n\nfrom datadog_checks.dev.tooling.constants import get_root, set_root\nfrom datadog_checks.dev.tooling.datastructures import JSONDict\nfrom datadog_checks.dev.tooling.manifest_validator import get_all_validators\n\n\ndef test_manifest_ok():\n manifest = JSONDict(\n {\n \"categories\": [\"os & system\", \"log collection\"],\n \"creates_events\": False,\n \"description\": \"Collect and graph Microsoft Active Directory metrics\",\n \"display_name\": \"Active Directory\",\n \"guid\": \"ba667ff3-cf6a-458c-aa4b-1172f33de562\",\n \"is_public\": True,\n \"maintainer\": \"help@datadoghq.com\",\n \"manifest_version\": \"1.0.0\",\n \"metric_prefix\": \"active_directory.\",\n \"metric_to_check\": \"active_directory.dra.inbound.objects.persec\",\n \"name\": \"active_directory\",\n \"public_title\": \"Datadog-Active Directory Integration\",\n \"short_description\": \"Collect and graph Microsoft Active Directory metrics\",\n \"support\": \"core\",\n \"supported_os\": [\"windows\"],\n \"type\": \"check\",\n \"integration_id\": \"active-directory\",\n \"assets\": {\n \"configuration\": {\"spec\": \"assets/configuration/spec.yaml\"},\n \"monitors\": {},\n \"dashboards\": {\"Active Directory\": \"assets/dashboards/active_directory.json\"},\n \"service_checks\": \"assets/service_checks.json\",\n \"logs\": {\"source\": \"ruby\"},\n \"metrics_metadata\": \"metadata.csv\",\n },\n }\n )\n root = Path(os.path.realpath(__file__)).parent.parent.parent.parent.parent.absolute()\n current_root = get_root()\n set_root(str(root))\n try:\n validators = get_all_validators(False, \"1.0.0\")\n for validator in validators:\n validator.validate('active_directory', manifest, False)\n assert not validator.result.failed, validator.result\n assert not validator.result.fixed\n finally:\n set_root(current_root)\n", "sub_path": "datadog_checks_dev/tests/tooling/manifest_validator/test_validator.py", "file_name": "test_validator.py", "file_ext": "py", "file_size_in_byte": 2193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datadog_checks.dev.tooling.datastructures.JSONDict", "line_number": 13, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "datadog_checks.dev.tooling.constants.get_root", "line_number": 43, "usage_type": "call"}, {"api_name": "datadog_checks.dev.tooling.constants.set_root", "line_number": 44, "usage_type": "call"}, {"api_name": "datadog_checks.dev.tooling.manifest_validator.get_all_validators", "line_number": 46, "usage_type": "call"}, {"api_name": "datadog_checks.dev.tooling.constants.set_root", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "85523096", "text": "\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom tensorflow.keras.utils import to_categorical\nfrom nltk import RegexpTokenizer\nfrom nltk.corpus import stopwords, wordnet\ntry:\n nltk.data.find('tokenizers/punkt')\nexcept LookupError:\n nltk.download('punkt')\n\ntry:\n nltk.data.find('corpus/stopwords')\nexcept LookupError:\n nltk.download('stopwords')\n\n\nclass TextDataset:\n \"\"\" TextDataset object is used to manage text dataset\n It builds from json file\n\n Args:\n json_files (tuple): paths of the files to parse\n x_col (str): name of the column containing data\n y_col (str): name of the column containing labels\n\n Attributes:\n _data (pandas.DataFrame): DataFrame containing the full set\n \"\"\"\n\n def __init__(self, json_files:str, x_col:str, y_col:str):\n self._data = self._read_json(json_files)\n self._labels = None\n\n self._add_one_hot()\n\n print(f'Loaded {self.__len__()} rows')\n\n def _read_json(self, tuple_files:tuple) -> pd.DataFrame:\n \"\"\" Read multiple json files and concat them in a single DataFrame\n\n Parameters:\n tuple_files (tuple): path of the files\n \"\"\"\n df = pd.DataFrame()\n\n for file in tuple_files:\n df = df.append(pd.read_json(file), ignore_index=True)\n\n return df\n\n def _add_one_hot(self):\n \"\"\" Add labels converted to one hot vector to the dataset\n \"\"\"\n self._labels, indices = np.unique(self._data['intent'], return_inverse=True)\n one_hot_values = to_categorical(indices)\n\n self._data = pd.concat((self._data, pd.DataFrame(one_hot_values)), axis=1)\n\n def _find_synonyms(self, word:str) -> list:\n \"\"\" Find the french synonyms of a given word\n\n Parameters:\n word (str): a word\n\n Returns:\n list: A list of synonyms of a given word\n \"\"\"\n synonyms = []\n for synset in wordnet.synsets(word):\n for syn in synset.lemma_names('fra'):\n if syn not in synonyms:\n synonyms.append(syn)\n\n return synonyms\n\n def _synonym_replacement(self, sentence:str) -> list:\n \"\"\" Build new sentenced by converting some words to there synonyms\n\n Parameters:\n sentence (str): a sentence\n\n Returns:\n list: Outputs a list of sentence with modified words\n \"\"\"\n toknizer = RegexpTokenizer(r'''\\w'|\\w+|[^\\w\\s]''')\n words = toknizer.tokenize(sentence)\n stoplist = stopwords.words('french')\n stoplist.append('ferret')\n n_sentence = []\n for w in words:\n if w not in stoplist:\n syn = self._find_synonyms(w)\n if len(syn) > 0:\n for s in syn[:min(10, len(syn))]:\n n_sentence.append(sentence.replace(w, s))\n\n return n_sentence\n\n def augment_data(self) -> pd.DataFrame:\n \"\"\" Augment the dataset\n \"\"\"\n new_sentences = []\n labels = []\n one_hot_lab = []\n for index, row in self._data.iterrows():\n if row['intent'] != 'irrelevant':\n sentences = self._synonym_replacement(row['sentence'])\n for s in sentences:\n new_sentences.append(s)\n labels.append(row['intent'])\n vector = np.zeros(8)\n idx = list(self._labels).index(row['intent'])\n vector[idx] = 1\n one_hot_lab.append(vector)\n\n new_data = pd.DataFrame({'sentence': new_sentences, 'intent': labels})\n ones = pd.DataFrame(one_hot_lab)\n return pd.concat((new_data, ones), axis=1)\n\n def augment_and_balance(self):\n \"\"\" Augment and balance the dataset, it takes the smallest number of occurence\n of one classe and balance the number in other classes\n \"\"\"\n self._data = self._data.sample(frac=1)\n augmented_data = self.augment_data().sample(frac=1)\n\n # counts\n count_init = self._data['intent'].value_counts()\n count_augm = augmented_data['intent'].value_counts()\n count_augm['irrelevant'] = 0\n\n sum_counts = count_init + count_augm\n min_value = min(sum_counts)\n\n n_df = pd.DataFrame()\n\n for cl in self._labels:\n if count_init[cl] >= min_value:\n select = self._data.loc[self._data['intent'] == cl][:min_value]\n n_df = n_df.append(select, ignore_index=True)\n else:\n missing_data = min_value - count_init[cl]\n n_df = n_df.append(self._data.loc[self._data['intent'] == cl], ignore_index=True)\n select = augmented_data.loc[augmented_data['intent'] == cl][:missing_data]\n n_df = n_df.append(select, ignore_index=True)\n\n balanced_data = n_df.sample(frac=1)\n balanced_data['intent'].value_counts()\n self._data = balanced_data\n print(f'Dataset contains now {self.__len__()} rows')\n\n def split_data(self, frac=0.2) -> tuple:\n \"\"\" Split the dataset into training set and testing set\n\n Parameters:\n frac (double): the fraction of dataset to be used as test set\n\n Returns:\n tuple: outputs a tuple containing the train and the test dataset\n \"\"\"\n df = self._data.sample(frac=1)\n size_train = int((1 - frac) * self.__len__())\n return df[:size_train], df[size_train:]\n\n @property\n def data(self):\n return self._data\n\n def __len__(self):\n return len(self._data)\n", "sub_path": "src/TextDataset.py", "file_name": "TextDataset.py", "file_ext": "py", "file_size_in_byte": 5582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "nltk.data.find", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.data", "line_number": 9, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.data.find", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.data", "line_number": 14, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet.synsets", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 71, "usage_type": "name"}, {"api_name": "nltk.RegexpTokenizer", "line_number": 87, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 89, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "355276877", "text": "import argparse\nimport datetime\nimport os\nimport pickle \nimport uuid\n\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np \nfrom PIL import Image\nfrom moviepy.editor import VideoFileClip\n\nfrom torch.autograd import Variable \nfrom torchvision import transforms\n\nfrom .model import EncoderCNN, DecoderRNN\n\n\ndef to_var(x, volatile=False):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, volatile=volatile)\n\n\ndef transform_image(image, transform=None):\n image = image.resize([224, 224], Image.LANCZOS)\n \n if transform is not None:\n image = transform(image).unsqueeze(0)\n \n return image\n \n\ndef load_model(vocab_path, embed_size, hidden_size, num_layers, encoder_path, decoder_path):\n \n transform = transforms.Compose([\n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # Load vocabulary wrapper\n with open(vocab_path, 'rb') as f:\n vocab = pickle.load(f)\n\n # Build Models\n encoder = EncoderCNN(embed_size)\n encoder.eval() # evaluation mode (BN uses moving mean/variance)\n decoder = DecoderRNN(embed_size, hidden_size, \n len(vocab), num_layers)\n \n # Load the trained model parameters\n encoder.load_state_dict(torch.load(encoder_path))\n decoder.load_state_dict(torch.load(decoder_path))\n\n return encoder, decoder, vocab, transform\n\n\ndef caption_video(encoder, decoder, vocab, transform, video, fps=0.1, save=False, image_dir=None):\n # Image preprocessing\n report = []\n for i, frame in enumerate(video.iter_frames(fps=fps)):\n time_stamp = datetime.timedelta(seconds=i / fps)\n\n image = Image.fromarray(frame)\n image = transform_image(image, transform)\n image_tensor = to_var(image, volatile=True)\n \n # If use gpu\n if torch.cuda.is_available():\n encoder.cuda()\n decoder.cuda()\n \n # Generate caption from image\n feature = encoder(image_tensor)\n sampled_ids = decoder.sample(feature)\n sampled_ids = sampled_ids.cpu().data.numpy()\n \n # Decode word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n if word != '' and word != '':\n sampled_caption.append(word)\n if word == '':\n break\n sentence = ' '.join(sampled_caption)\n \n report.append((str(time_stamp), sentence))\n\n print(time_stamp, sentence)\n\n # Print out image and generated caption\n if save == 'true' and image_dir:\n plt.axis('off')\n plt.imshow(frame)\n plt.title(sentence)\n plt.savefig(os.path.join(image_dir, str(uuid.uuid4()) + str(i)), bbox_images='tight')\n\n return report\n", "sub_path": "lib/caption.py", "file_name": "caption.py", "file_ext": "py", "file_size_in_byte": 2904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 43, "usage_type": "call"}, {"api_name": "model.EncoderCNN", "line_number": 46, "usage_type": "call"}, {"api_name": "model.DecoderRNN", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 69, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "253861775", "text": "from random import choice\nimport random\nimport aiohttp\nimport discord\nfrom discord.ext import commands\nfrom .utils.chat_formatting import *\nfrom .utils.dataIO import dataIO\nfrom .utils.dataIO import fileIO\nfrom cogs.utils import checks\n\n\nclass TrustyBot:\n def __init__(self, bot):\n self.bot = bot\n self.text = dataIO.load_json(\"data/trustybot/messages.json\")\n self.links = dataIO.load_json(\"data/trustybot/links.json\")\n self.images = dataIO.load_json(\"data/trustybot/images.json\")\n self.files = dataIO.load_json(\"data/trustybot/files.json\")\n self.donotdo = dataIO.load_json(\"data/dnd/donotdo.json\")\n\n def first_word(self, msg):\n return msg.split(\" \")[0]\n\n def get_prefix(self, server, msg):\n prefixes = self.bot.settings.get_prefixes(server)\n for p in prefixes:\n if msg.startswith(p):\n return p\n return None\n\n def part_of_existing_command(self, alias, server):\n '''Command or alias'''\n for command in self.bot.commands:\n if alias.lower() == command.lower():\n return True\n return False\n\n async def on_message(self, message):\n if len(message.content) < 2 or message.channel.is_private:\n return\n\n msg = message.content\n server = message.server\n channel = message.channel\n prefix = self.get_prefix(server, msg)\n\n if not prefix:\n return\n ignorelist = [\"dickbutt\", \"cookie\", \"tinfoil\", \"donate\", \"dreams\", \"memes\"]\n\n alias = self.first_word(msg[len(prefix):]).lower()\n if alias in ignorelist:\n return\n\n if alias in self.images:\n image = self.images[alias]\n await self.bot.send_typing(channel)\n await self.bot.send_file(channel, image)\n \n if alias in self.links:\n link = self.links[alias]\n await self.bot.send_typing(channel)\n await self.bot.send_message(channel, link)\n \n if alias in self.text:\n msg = self.text[alias]\n await self.bot.send_typing(channel)\n await self.bot.send_message(channel, msg)\n\n @commands.command(pass_context=True)\n async def addimage(self, ctx, command):\n \"\"\"Add an image to direct upload.\"\"\"\n author = ctx.message.author\n server = ctx.message.server\n channel = ctx.message.channel\n prefix = self.get_prefix(server, ctx.message.content)\n msg = ctx.message\n if command is not \"\":\n if command in self.images or self.part_of_existing_command(command, server):\n await self.bot.say(\"{} is already in the list, try another!\".format(command))\n return\n else:\n await self.bot.say(\"{} added as the command!\".format(command))\n await self.bot.say(\"Upload an image for me to use!\")\n while msg is not None:\n msg = await self.bot.wait_for_message(author=author, timeout=60)\n if msg is None:\n await self.bot.say(\"No image uploaded then.\")\n break\n\n if msg.attachments != []:\n filename = msg.attachments[0][\"filename\"]\n directory = \"data/trustybot/img/\" + filename\n if command is None:\n command = filename.split(\".\")[0]\n if directory in self.images.values():\n seed = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n directory = \"data/trustybot/img/\" + seed + filename\n if directory not in self.images.values():\n self.images[command] = directory\n dataIO.save_json(\"data/trustybot/images.json\", self.images)\n with aiohttp.ClientSession() as session:\n async with session.get(msg.attachments[0][\"url\"]) as resp:\n test = await resp.read()\n with open(self.images[command], \"wb\") as f:\n f.write(test)\n await self.bot.send_message(channel, \"{} has been added to my files!\"\n .format(command))\n break\n if msg.content.lower().strip() == \"exit\":\n await self.bot.say(\"Your changes have been saved.\")\n break\n \n @commands.command()\n async def listimages(self):\n \"\"\"List images added to bot\"\"\"\n msg = \"\"\n for image in self.images.keys():\n msg += image + \", \"\n await self.bot.say(\"```\" + msg[:len(msg)-2] + \"```\")\n \n @commands.command()\n async def listtext(self):\n \"\"\"List phrases added to bot\"\"\"\n msg = \"\"\n for text in self.text.keys():\n msg += text + \", \"\n await self.bot.say(\"```\" + msg[:len(msg)-2] + \"```\")\n \n @commands.command()\n async def listlinks(self):\n \"\"\"List links added to bot\"\"\"\n msg = \"\"\n for link in self.links.keys():\n msg += link + \", \"\n await self.bot.say(\"```\" + msg[:len(msg)-2] + \"```\")\n \n\n @commands.command(pass_context=True, aliases=[\"db\"])\n async def dickbutt(self, ctx):\n \"\"\"DickButt\"\"\"\n ext = [\"png\", \"gif\"]\n if ctx.message.server.id != \"261565811309674499\":\n await self.bot.upload(self.images[\"dickbutt\"]\n .format(choice(ext)))\n \n @commands.command(pass_context=True)\n async def neat(self, ctx, number=None):\n \"\"\"Neat\"\"\"\n files = \"data/trustybot/img/neat{}.gif\"\n if number is None:\n await self.bot.upload(files.format(str(choice(range(1, 6)))))\n elif number.isdigit() and (int(number) > 0 or int(number) < 8):\n await self.bot.upload(files.format(number))\n\n @commands.command(pass_context=True)\n async def cookie(self, ctx, user=None):\n \"\"\"cookie\"\"\"\n msg = \"Here's a cookie {}! :smile:\"\n if user is None:\n await self.bot.upload(self.images[\"cookie\"])\n else:\n await self.bot.upload(self.images[\"cookie\"],\n content=msg.format(user))\n\n @commands.command(pass_context=True, aliases=[\"tf\"])\n async def tinfoil(self, ctx):\n \"\"\"Liquid Metal Embrittlement\"\"\"\n await self.bot.upload(self.images[\"tinfoil\"]\n .format(choice([\"1\", \"2\"])))\n\n @commands.command(pass_context=True,)\n async def donate(self, ctx):\n \"\"\"Donate some bitcoin!\"\"\"\n gabcoin = \"1471VCzShn9kBSrZrSX1Y3KwjrHeEyQtup\"\n DONATION = \"1DMfQgbyEW1u6M2XbUt5VFP6JARNs8uptQ\"\n msg = \"Feel free to send bitcoin donations to `{}` :smile:\"\n gabimg = \"data/trustybot/img/gabbtc.jpg\"\n img = \"data/trustybot/img/btc.png\"\n if ctx.message.server.id == \"261565811309674499\":\n await self.bot.upload(gabimg)\n await self.bot.say(msg.format(gabcoin))\n else:\n await self.bot.upload(img)\n await self.bot.say(msg.format(DONATION))\n\n # Text Commands #\n @commands.command(hidden=False)\n @commands.cooldown(1, 60, commands.BucketType.server)\n async def grep(self):\n \"\"\"Get the fuck out of here with grep!\"\"\"\n await self.bot.say(\"Get the fuck out of here with grep!\")\n \n @commands.command(pass_context=True)\n async def dnd(self, ctx, number=None):\n if number is None:\n await self.bot.say(choice(self.donotdo))\n elif number.isdigit():\n await self.bot.say(self.donotdo[int(number)-1])\n else:\n await self.bot.say(choice(self.donotdo))\n\n @commands.command(hidden=False)\n async def passphrase(self):\n \"\"\"Wikileaks Vault7 Part 1 passphrase\"\"\"\n await self.bot.say(\"`SplinterItIntoAThousandPiecesAndScatterItIntoTheWinds`\")\n\n @commands.command(name=\"pineal\", aliases=[\"pineal gland\"])\n async def pinealGland(self, message=None):\n \"\"\"Links to pineal gland\"\"\"\n if message == \"calcification\" or message == \"calcified\":\n await self.bot.say(self.links[\"pineal\"][1])\n if message == \"healthy\":\n await self.bot.say(self.links[\"pineal\"][2])\n if message is None:\n await self.bot.say(self.links[\"pineal\"][0])\n\n @commands.command(hiddent=False, pass_context=True)\n async def illuminati(self, ctx):\n \"\"\"o.o\"\"\"\n emilum = [\"\\U0001F4A1\", \"\\U000026A0\", \"\\U0000203C\", \"\\U000026D4\"]\n ilum = \":bulb: :warning: :bangbang: :no_entry:\"\n msg = await self.bot.say(ilum)\n for i in emilum:\n await self.bot.add_reaction(msg, emoji=i)\n\n @commands.command(hidden=False)\n async def halp(self, user=None):\n \"\"\"How to ask for help!\"\"\"\n msg = \"{} please type `;help` to be PM'd all my commands! :smile:\"\n if user is None:\n await self.bot.say(msg.format(\"\"))\n else:\n await self.bot.say(msg.format(user))\n\n @commands.command(hidden=False)\n async def dreams(self):\n \"\"\"don't let your dreams be dreams\"\"\"\n await self.bot.say(self.text[\"dreams\"].format(\"dreams\"))\n\n @commands.command(hidden=False)\n async def memes(self):\n \"\"\"don't let your memes be dreams\"\"\"\n await self.bot.say(self.text[\"dreams\"].format(\"memes\"))\n\n @commands.command(pass_context=True)\n async def flipm(self, ctx, *, message):\n \"\"\"Flips a message\"\"\"\n msg = \"\"\n name = \"\"\n for user in message:\n char = \"abcdefghijklmnopqrstuvwxyz - ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n tran = \"ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz - ∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z\"\n table = str.maketrans(char, tran)\n name += user.translate(table) + \" \"\n await self.bot.say(msg + \"(╯°□°)╯︵ \" + name[::-1])\n\n\ndef setup(bot):\n n = TrustyBot(bot)\n bot.add_cog(n)\n", "sub_path": "trustybot/trustybot.py", "file_name": "trustybot.py", "file_ext": "py", "file_size_in_byte": 9979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "utils.dataIO.dataIO.load_json", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.dataIO.dataIO.load_json", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.dataIO.dataIO.load_json", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 17, "usage_type": "name"}, {"api_name": "utils.dataIO.dataIO.load_json", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.dataIO.dataIO.load_json", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 19, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO.save_json", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.dataIO.dataIO", "line_number": 101, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 102, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 70, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 70, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 114, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 114, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 122, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 122, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 130, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 130, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 145, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 139, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 139, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 152, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 147, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 147, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 156, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 156, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 170, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 166, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 166, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 172, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 172, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 188, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 188, "usage_type": "name"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 189, "usage_type": "name"}, {"api_name": "discord.ext.commands.BucketType", "line_number": 189, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 197, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 201, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 194, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 194, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 203, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 203, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 208, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 208, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 218, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 218, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 227, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 227, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 236, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 236, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 241, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 241, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 246, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 246, "usage_type": "name"}]} +{"seq_id": "32835450", "text": "# -*- coding: utf-8 -*-\n#\n# const.py - A set of structures and constants used to implement the Ethernet/IP protocol\n#\n# Copyright (c) 2019 Ian Ottoway \n# Copyright (c) 2014 Agostino Ruscito \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport struct\nfrom collections import defaultdict\nfrom types import GeneratorType\nfrom typing import Union, List, Sequence, Tuple, Optional, Any\n\nfrom autologging import logged\n\nfrom . import DataError, Tag, RequestError\nfrom .base import Base\nfrom .bytes_ import (pack_dint, pack_uint, pack_udint, pack_usint, unpack_usint, unpack_uint, unpack_dint, unpack_udint,\n pack_sint, UNPACK_DATA_FUNCTION, PACK_DATA_FUNCTION, DATA_FUNCTION_SIZE)\nfrom .const import (SUCCESS, EXTENDED_SYMBOL, ENCAPSULATION_COMMAND, DATA_TYPE, BITS_PER_INT_TYPE,\n REPLY_INFO, TAG_SERVICES_REQUEST, PADDING_BYTE, ELEMENT_ID, DATA_ITEM, ADDRESS_ITEM,\n CLASS_ID, CLASS_CODE, INSTANCE_ID, INSUFFICIENT_PACKETS, REPLY_START, BASE_TAG_BIT,\n MULTISERVICE_READ_OVERHEAD, MULTISERVICE_WRITE_OVERHEAD, MIN_VER_INSTANCE_IDS, REQUEST_PATH_SIZE,\n VENDORS, PRODUCT_TYPES, KEYSWITCH, TAG_SERVICES_REPLY, get_service_status, get_extended_status,\n TEMPLATE_MEMBER_INFO_LEN, EXTERNAL_ACCESS, DATA_TYPE_SIZE)\n\n# re_bit = re.compile(r'(?P^.*)\\.(?P([0-2][0-9])|(3[01])|[0-9])$')\n\n\ndef with_forward_open(func):\n \"\"\"Decorator to ensure a forward open request has been completed with the plc\"\"\"\n\n def forward_open_decorator(self, *args, **kwargs):\n if not self.forward_open():\n msg = f'Target did not connected. {func.__name__} will not be executed.'\n self.__log.warning(msg)\n raise DataError(msg)\n return func(self, *args, **kwargs)\n\n return forward_open_decorator\n\n\n@logged\nclass LogixDriver(Base):\n \"\"\"\n An Ethernet/IP Client library for reading and writing tags in ControlLogix and CompactLogix PLCs.\n\n The following services have been implemented:\n - Read Tag Service (0x4c)\n - Read Tag Fragment Service (0x52)\n - Write Tag Service (0x4d)\n - Write Tag Fragment Service (0x53)\n - Multiple Service Packet (0x0a)\n - Read Modify Write Tag (0xce)\n\n\"\"\"\n\n def __init__(self, ip_address: str, *args, slot: int = 0, large_packets: bool = True,\n init_info: bool = True, init_tags: bool = True, init_program_tags: bool = False, **kwargs):\n \"\"\"\n :param ip_address: IP address of PLC\n :param slot: Slot of PLC in chassis (leave at 0 for CompactLogix)\n :param large_packets: if True, Extended Forward Open will be used (v20+ and EN2T+)\n :param init_info: if True, initializes controller info (name, revision, etc) on connect\n :param init_tags: if True, uploads all controller-scoped tag definitions on connect\n :param init_program_tags: if True, uploads all program-scoped tag definitions on connect\n \"\"\"\n super().__init__(*args, **kwargs)\n self._cache = None\n\n self._data_types = {}\n self._program_names = set()\n self._tags = {}\n\n self.attribs['ip address'] = ip_address\n self.attribs['cpu slot'] = slot\n self.attribs['extended forward open'] = large_packets\n self.connection_size = 4000 if large_packets else 500\n self.use_instance_ids = True\n\n if init_tags or init_info:\n self.open()\n if init_info:\n self.get_plc_info()\n self.use_instance_ids = self.info.get('version_major', 0) >= MIN_VER_INSTANCE_IDS\n self.get_plc_name()\n\n if init_tags:\n self.get_tag_list(program='*' if init_program_tags else None)\n\n @property\n def tags(self) -> dict:\n \"\"\"\n Read-only property to access all the tag definitions uploaded from the controller.\n \"\"\"\n return self._tags\n\n @property\n def data_types(self):\n return self._data_types\n\n @with_forward_open\n def get_plc_name(self) -> str:\n try:\n request = self.new_request('send_unit_data')\n request.add(\n bytes([TAG_SERVICES_REQUEST['Get Attributes']]),\n REQUEST_PATH_SIZE,\n CLASS_ID['8-bit'],\n CLASS_CODE['Program Name'],\n INSTANCE_ID[\"16-bit\"],\n b'\\x00',\n b'\\x01\\x00', # Instance 1\n b'\\x01\\x00', # Number of Attributes\n b'\\x01\\x00' # Attribute 1 - program name\n )\n\n response = request.send()\n\n if response:\n self._info['name'] = _parse_plc_name(response)\n return self._info['name']\n else:\n raise DataError(f'send_unit_data did not return valid data - {response.error}')\n\n except Exception as err:\n raise DataError(err)\n\n @with_forward_open\n def get_plc_info(self) -> dict:\n \"\"\"\n Reads basic information from the controller, returns it and stores it in the ``info`` property.\n\n info = {\n 'vendor': 'Rockwell Automation/Allen-Bradley',\n 'product_type': 'Programmable Logic Controller',\n 'product_code': 55,\n 'version_major': 20,\n 'version_minor': 12,\n 'revision': '20.12',\n 'serial': '00ff00ff',\n 'device_type': '1756-L62/B LOGIX5562',\n 'keyswitch': 'REMOTE RUN',\n 'name': 'PLCA'\n }\n \"\"\"\n try:\n request = self.new_request('send_unit_data')\n request.add(\n b'\\x01', # Service\n REQUEST_PATH_SIZE,\n CLASS_ID['8-bit'],\n CLASS_CODE['Identity Object'],\n INSTANCE_ID[\"16-bit\"],\n b'\\x00',\n b'\\x01\\x00', # Instance 1\n )\n response = request.send()\n\n if response:\n info = _parse_plc_info(response.data)\n self._info = {**self._info, **info}\n return info\n else:\n raise DataError(f'send_unit_data did not return valid data - {response.error}')\n\n except Exception as err:\n raise DataError(err)\n\n @with_forward_open\n def get_tag_list(self, program: str = None, cache: bool = True) -> List[dict]:\n \"\"\"\n Returns the list of tags from the controller. For only controller-scoped tags, set `program` to None (default).\n Set `program` to a program name to only get the program scoped tags from the specified program.\n To get all controller and all program scoped tags from all programs, set `program` to '*'\n\n Note, for program scoped tags the tag['tag_name'] will be 'Program:{program}.{tag_name}'. This is so the tag\n list can be fed directly into the read function.\n\n If the `cache` parameter is True (default), the list of tags will be stored so they can be referenced later.\n This also allows the read/write methods to use the cached instance id's and allow packing more tags into a single\n request. While this method returns a list of tags, when cached the tag list is stored as a dict of {tag['tag_name'] : tag, ...}\n \"\"\"\n\n self._cache = {\n 'tag_name:id': {},\n 'id:struct': {},\n 'handle:id': {},\n 'id:udt': {}\n }\n\n if program == '*':\n tags = self._get_tag_list()\n for prog in self._program_names:\n tags += self._get_tag_list(prog)\n else:\n tags = self._get_tag_list(program)\n\n if cache:\n self._tags = {tag['tag_name']: tag for tag in tags}\n\n self._cache = None\n\n return tags\n\n def _get_tag_list(self, program=None):\n all_tags = self._get_instance_attribute_list_service(program)\n user_tags = self._isolating_user_tag(all_tags, program)\n for tag in user_tags:\n if tag['tag_type'] == 'struct':\n tag['data_type'] = self._get_data_type(tag['template_instance_id'])\n\n return user_tags\n\n def _get_instance_attribute_list_service(self, program=None):\n \"\"\" Step 1: Finding user-created controller scope tags in a Logix5000 controller\n\n This service returns instance IDs for each created instance of the symbol class, along with a list\n of the attribute data associated with the requested attribute\n \"\"\"\n try:\n last_instance = 0\n tag_list = []\n while last_instance != -1:\n # Creating the Message Request Packet\n path = []\n if program:\n if not program.startswith('Program:'):\n program = f'Program:{program}'\n path = [EXTENDED_SYMBOL, pack_usint(len(program)), program.encode('utf-8')]\n if len(program) % 2:\n path.append(b'\\x00')\n\n path += [\n # Request Path ( 20 6B 25 00 Instance )\n CLASS_ID[\"8-bit\"], # Class id = 20 from spec 0x20\n CLASS_CODE[\"Symbol Object\"], # Logical segment: Symbolic Object 0x6B\n INSTANCE_ID[\"16-bit\"], # Instance Segment: 16 Bit instance 0x25\n b'\\x00',\n pack_uint(last_instance), # The instance\n ]\n path = b''.join(path)\n path_size = pack_usint(len(path) // 2)\n request = self.new_request('send_unit_data')\n request.add(\n bytes([TAG_SERVICES_REQUEST['Get Instance Attributes List']]),\n path_size,\n path,\n # Request Data\n b'\\x07\\x00', # Number of attributes to retrieve\n b'\\x01\\x00', # Attr. 1: Symbol name\n b'\\x02\\x00', # Attr. 2 : Symbol Type\n b'\\x03\\x00', # Attr. 3 : Symbol Address\n b'\\x05\\x00', # Attr. 5 : Symbol Object Address\n b'\\x06\\x00', # Attr. 6 : ? - Not documented (Software Control?)\n b'\\x0a\\x00', # Attr. 10 : external access\n b'\\x08\\x00' # Attr. 8 : array dimensions [1,2,3]\n )\n response = request.send()\n if not response:\n raise DataError(f\"send_unit_data returned not valid data - {response.error}\")\n\n last_instance = self._parse_instance_attribute_list(response, tag_list)\n return tag_list\n\n except Exception as e:\n raise DataError(e)\n\n def _parse_instance_attribute_list(self, response, tag_list):\n \"\"\" extract the tags list from the message received\"\"\"\n\n tags_returned = response.data\n tags_returned_length = len(tags_returned)\n idx = count = instance = 0\n try:\n while idx < tags_returned_length:\n instance = unpack_dint(tags_returned[idx:idx + 4])\n idx += 4\n tag_length = unpack_uint(tags_returned[idx:idx + 2])\n idx += 2\n tag_name = tags_returned[idx:idx + tag_length]\n idx += tag_length\n symbol_type = unpack_uint(tags_returned[idx:idx + 2])\n idx += 2\n count += 1\n symbol_address = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n symbol_object_address = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n software_control = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n access = tags_returned[idx] & 0b_0011\n idx += 1\n dim1 = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n dim2 = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n dim3 = unpack_udint(tags_returned[idx:idx + 4])\n idx += 4\n\n tag_list.append({'instance_id': instance,\n 'tag_name': tag_name,\n 'symbol_type': symbol_type,\n 'symbol_address': symbol_address,\n 'symbol_object_address': symbol_object_address,\n 'software_control': software_control,\n 'external_access': EXTERNAL_ACCESS.get(access, 'Unknown'),\n 'dimensions': [dim1, dim2, dim3]})\n\n except Exception as e:\n raise DataError(e)\n\n if response.service_status == SUCCESS:\n last_instance = -1\n elif response.service_status == INSUFFICIENT_PACKETS:\n last_instance = instance + 1\n else:\n self.__log.warning('unknown status during _parse_instance_attribute_list')\n last_instance = -1\n\n return last_instance\n\n def _isolating_user_tag(self, all_tags, program=None):\n try:\n user_tags = []\n for tag in all_tags:\n name = tag['tag_name'].decode()\n if 'Program:' in name:\n self._program_names.add(name.replace('Program:', ''))\n continue\n if ':' in name or '__' in name:\n continue\n if tag['symbol_type'] & 0b0001_0000_0000_0000:\n continue\n\n if program is not None:\n name = f'Program:{program}.{name}'\n\n self._cache['tag_name:id'][name] = tag['instance_id']\n\n new_tag = {\n 'tag_name': name,\n 'dim': (tag['symbol_type'] & 0b0110000000000000) >> 13, # bit 13 & 14, number of array dims\n 'instance_id': tag['instance_id'],\n 'symbol_address': tag['symbol_address'],\n 'symbol_object_address': tag['symbol_object_address'],\n 'software_control': tag['software_control'],\n 'alias': False if tag['software_control'] & BASE_TAG_BIT else True,\n 'external_access': tag['external_access'],\n 'dimensions': tag['dimensions']\n }\n\n if tag['symbol_type'] & 0b_1000_0000_0000_0000: # bit 15, 1 = struct, 0 = atomic\n template_instance_id = tag['symbol_type'] & 0b_0000_1111_1111_1111\n new_tag['tag_type'] = 'struct'\n new_tag['template_instance_id'] = template_instance_id\n else:\n new_tag['tag_type'] = 'atomic'\n datatype = tag['symbol_type'] & 0b_0000_0000_1111_1111\n new_tag['data_type'] = DATA_TYPE[datatype]\n if datatype == DATA_TYPE['BOOL']:\n new_tag['bit_position'] = (tag['symbol_type'] & 0b_0000_0111_0000_0000) >> 8\n\n user_tags.append(new_tag)\n\n return user_tags\n except Exception as e:\n raise DataError(e)\n\n def _get_structure_makeup(self, instance_id):\n \"\"\"\n get the structure makeup for a specific structure\n \"\"\"\n if instance_id not in self._cache['id:struct']:\n if not self._target_is_connected:\n if not self.forward_open():\n self.__log.warning(\"Target did not connected. get_tag_list will not be executed.\")\n raise DataError(\"Target did not connected. get_tag_list will not be executed.\")\n request = self.new_request('send_unit_data')\n request.add(\n bytes([TAG_SERVICES_REQUEST['Get Attributes']]),\n b'\\x03', # Request Path ( 20 6B 25 00 Instance )\n CLASS_ID[\"8-bit\"], # Class id = 20 from spec 0x20\n CLASS_CODE[\"Template Object\"], # Logical segment: Template Object 0x6C\n INSTANCE_ID[\"16-bit\"], # Instance Segment: 16 Bit instance 0x25\n b'\\x00',\n pack_uint(instance_id),\n b'\\x04\\x00', # Number of attributes\n b'\\x04\\x00', # Template Object Definition Size UDINT\n b'\\x05\\x00', # Template Structure Size UDINT\n b'\\x02\\x00', # Template Member Count UINT\n b'\\x01\\x00', # Structure Handle We can use this to read and write UINT\n )\n\n response = request.send()\n if not response:\n raise DataError(f\"send_unit_data returned not valid data\", response.error)\n _struct = self._parse_structure_makeup_attributes(response)\n self._cache['id:struct'][instance_id] = _struct\n self._cache['handle:id'][_struct['structure_handle']] = instance_id\n\n return self._cache['id:struct'][instance_id]\n\n @staticmethod\n def _parse_structure_makeup_attributes(response):\n \"\"\" extract the tags list from the message received\"\"\"\n structure = {}\n\n if response.service_status != SUCCESS:\n structure['Error'] = response.service_status\n return\n\n attribute = response.data\n idx = 4\n try:\n if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:\n idx += 2\n structure['object_definition_size'] = unpack_dint(attribute[idx:idx + 4])\n else:\n structure['Error'] = 'object_definition Error'\n return structure\n\n idx += 6\n if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:\n idx += 2\n structure['structure_size'] = unpack_dint(attribute[idx:idx + 4])\n else:\n structure['Error'] = 'structure Error'\n return structure\n\n idx += 6\n if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:\n idx += 2\n structure['member_count'] = unpack_uint(attribute[idx:idx + 2])\n else:\n structure['Error'] = 'member_count Error'\n return structure\n\n idx += 4\n if unpack_uint(attribute[idx:idx + 2]) == SUCCESS:\n idx += 2\n structure['structure_handle'] = unpack_uint(attribute[idx:idx + 2])\n else:\n structure['Error'] = 'structure_handle Error'\n return structure\n\n return structure\n\n except Exception as e:\n raise DataError(e)\n\n def _read_template(self, instance_id, object_definition_size):\n \"\"\" get a list of the tags in the plc\n\n \"\"\"\n\n offset = 0\n template_raw = b''\n try:\n while True:\n request = self.new_request('send_unit_data')\n request.add(\n bytes([TAG_SERVICES_REQUEST['Read Tag']]),\n b'\\x03', # Request Path ( 20 6B 25 00 Instance )\n CLASS_ID[\"8-bit\"], # Class id = 20 from spec\n CLASS_CODE[\"Template Object\"], # Logical segment: Template Object 0x6C\n INSTANCE_ID[\"16-bit\"], # Instance Segment: 16 Bit instance 0x25\n b'\\x00',\n pack_uint(instance_id),\n pack_dint(offset), # Offset\n pack_uint(((object_definition_size * 4) - 21) - offset)\n )\n response = request.send()\n\n if response.service_status not in (SUCCESS, INSUFFICIENT_PACKETS):\n raise DataError('Error reading template', response)\n\n template_raw += response.data\n\n if response.service_status == SUCCESS:\n break\n\n offset += len(response.data)\n\n except Exception:\n raise\n else:\n return template_raw\n\n def _parse_template_data(self, data, member_count):\n info_len = member_count * TEMPLATE_MEMBER_INFO_LEN\n info_data = data[:info_len]\n member_data = [self._parse_template_data_member_info(info)\n for info in (info_data[i:i + TEMPLATE_MEMBER_INFO_LEN]\n for i in range(0, info_len, TEMPLATE_MEMBER_INFO_LEN))]\n member_names = []\n template_name = None\n try:\n for name in (x.decode(errors='replace') for x in data[info_len:].split(b'\\x00') if len(x)):\n if template_name is None and ';' in name:\n template_name, _ = name.split(';', maxsplit=1)\n else:\n member_names.append(name)\n except (ValueError, UnicodeDecodeError):\n raise DataError(f'Unable to decode template or member names')\n\n predefine = template_name is None\n if predefine:\n template_name = member_names.pop(0)\n\n if template_name == 'ASCIISTRING82': # internal name for STRING builtin type\n template_name = 'STRING'\n\n template = {\n 'name': template_name, # predefined types put name as first member (DWORD)\n 'internal_tags': {},\n 'attributes': []\n }\n\n for member, info in zip(member_names, member_data):\n if not member.startswith('ZZZZZZZZZZ') and not member.startswith('__'):\n template['attributes'].append(member)\n template['internal_tags'][member] = info\n\n if template['attributes'] == ['LEN', 'DATA'] and \\\n template['internal_tags']['DATA']['data_type'] == 'SINT' and \\\n template['internal_tags']['DATA'].get('array'):\n template['string'] = template['internal_tags']['DATA']['array']\n\n return template\n\n def _parse_template_data_member_info(self, info):\n type_info = unpack_uint(info[:2])\n typ = unpack_uint(info[2:4])\n member = {'offset': unpack_udint(info[4:])}\n tag_type = 'atomic'\n if typ in DATA_TYPE:\n data_type = DATA_TYPE[typ]\n else:\n instance_id = typ & 0b0000_1111_1111_1111\n if instance_id in DATA_TYPE:\n data_type = DATA_TYPE[instance_id]\n else:\n tag_type = 'struct'\n data_type = self._get_data_type(instance_id)\n\n member['tag_type'] = tag_type\n member['data_type'] = data_type\n\n if data_type == 'BOOL':\n member['bit'] = type_info\n elif data_type is not None:\n member['array'] = type_info\n\n return member\n\n def _get_data_type(self, instance_id):\n if instance_id not in self._cache['id:udt']:\n try:\n template = self._get_structure_makeup(instance_id) # instance id from type\n if not template.get('Error'):\n _data = self._read_template(instance_id, template['object_definition_size'])\n data_type = self._parse_template_data(_data, template['member_count'])\n data_type['template'] = template\n self._cache['id:udt'][instance_id] = data_type\n self._data_types[data_type['name']] = data_type\n except Exception:\n self.__log.exception('fuck')\n\n return self._cache['id:udt'][instance_id]\n\n @with_forward_open\n def read(self, *tags: str) -> Union[Tag, List[Tag]]:\n \"\"\"\n\n :param tags: one or many tags to read\n :return: one or many ``Tag`` objects\n \"\"\"\n\n parsed_requests = self._parse_requested_tags(tags)\n requests = self._read__build_requests(parsed_requests)\n read_results = self._send_requests(requests)\n\n results = []\n\n for tag in tags:\n try:\n request_data = parsed_requests[tag]\n result = read_results[(request_data['plc_tag'], request_data['elements'])]\n if request_data.get('bit') is None:\n results.append(result)\n else:\n if result:\n typ, bit = request_data['bit']\n if typ == 'bit':\n val = bool(result.value & (1 << bit))\n else:\n val = result.value[bit % 32]\n results.append(Tag(tag, val, 'BOOL'))\n else:\n results.append(Tag(tag, None, None, result.error))\n except Exception as err:\n results.append(Tag(tag, None, None, f'Invalid tag request - {err}'))\n\n if len(tags) > 1:\n return results\n else:\n return results[0]\n\n def _read__build_requests(self, parsed_tags):\n requests = []\n response_size = 0\n current_request = self.new_request('multi_request')\n requests.append(current_request)\n tags_in_requests = set()\n for tag, tag_data in parsed_tags.items():\n if tag_data.get('error') is None and (tag_data['plc_tag'], tag_data['elements']) not in tags_in_requests:\n tags_in_requests.add((tag_data['plc_tag'], tag_data['elements']))\n return_size = _tag_return_size(tag_data['tag_info']) * tag_data['elements']\n if return_size > self.connection_size:\n _request = self.new_request('read_tag_fragmented')\n _request.add(tag_data['plc_tag'], tag_data['elements'], tag_data['tag_info'])\n requests.append(_request)\n else:\n try:\n if response_size + return_size < self.connection_size:\n if current_request.add_read(tag_data['plc_tag'], tag_data['elements'],\n tag_data['tag_info']):\n response_size += return_size\n else:\n response_size = return_size\n current_request = self.new_request('multi_request')\n current_request.add_read(tag_data['plc_tag'], tag_data['elements'],\n tag_data['tag_info'])\n requests.append(current_request)\n else:\n response_size = return_size\n current_request = self.new_request('multi_request')\n current_request.add_read(tag_data['plc_tag'], tag_data['elements'], tag_data['tag_info'])\n requests.append(current_request)\n except RequestError:\n self.__log.exception(f'Failed to build request for {tag} - skipping')\n continue\n\n return requests\n\n @with_forward_open\n def write(self, *tags_values: Sequence[Tuple[str, Union[int, float, str, bool]]]) -> Union[Tag, List[Tag]]:\n tags = (tag for (tag, value) in tags_values)\n parsed_requests = self._parse_requested_tags(tags)\n\n normal_tags = set()\n bit_tags = set()\n\n for tag, value in tags_values:\n parsed_requests[tag]['value'] = value\n\n if parsed_requests[tag].get('bit') is None:\n normal_tags.add(tag)\n else:\n bit_tags.add(tag)\n\n requests, bit_writes = self._write__build_requests(parsed_requests)\n write_results = self._send_requests(requests)\n results = []\n for tag, value in tags_values:\n try:\n request_data = parsed_requests[tag]\n bit = parsed_requests[tag].get('bit')\n result = write_results[(request_data['plc_tag'], request_data['elements'])]\n\n if request_data['elements'] > 1:\n result = result._replace(type=f'{result.type}[{request_data[\"elements\"]}]')\n if bit is not None:\n result = result._replace(tag=tag, type='BOOL', value=value)\n else:\n result = result._replace(tag=request_data['plc_tag'], value=value)\n results.append(result)\n except Exception as err:\n results.append(Tag(tag, None, None, f'Invalid tag request - {err}'))\n\n if len(tags_values) > 1:\n return results\n else:\n return results[0]\n\n def _write__build_requests(self, parsed_tags):\n requests = []\n current_request = self.new_request('multi_request')\n requests.append(current_request)\n bit_writes = {}\n\n tags_in_requests = set()\n for tag, tag_data in parsed_tags.items():\n if tag_data.get('error') is None and (tag_data['plc_tag'], tag_data['elements']) not in tags_in_requests:\n tags_in_requests.add((tag_data['plc_tag'], tag_data['elements']))\n\n string = _make_string_bytes(tag_data)\n if string is not None:\n tag_data['value'] = string\n\n if _bit_request(tag_data, bit_writes):\n continue\n\n tag_data['write_value'] = writable_value(tag_data['value'], tag_data['elements'],\n tag_data['tag_info']['data_type'])\n\n if len(tag_data['write_value']) > self.connection_size:\n _request = self.new_request('write_tag_fragmented')\n _request.add(tag_data['plc_tag'], tag_data['value'], tag_data['elements'], tag_data['tag_info'])\n requests.append(_request)\n continue\n\n try:\n if not current_request.add_write(tag_data['plc_tag'], tag_data['write_value'], tag_data['elements'],\n tag_data['tag_info']):\n current_request = self.new_request('multi_request')\n requests.append(current_request)\n current_request.add_write(tag_data['plc_tag'], tag_data['write_value'], tag_data['elements'],\n tag_data['tag_info'])\n\n except RequestError:\n self.__log.exception(f'Failed to build request for {tag} - skipping')\n continue\n\n if bit_writes:\n for tag in bit_writes:\n try:\n value = bit_writes[tag]['or_mask'], bit_writes[tag]['and_mask']\n if not current_request.add_write(tag, value, tag_info=bit_writes[tag]['tag_info'], bits_write=True):\n current_request = self.new_request('multi_request')\n requests.append(current_request)\n current_request.add_write(tag, value, tag_info=bit_writes[tag]['tag_info'], bits_write=True)\n except RequestError:\n self.__log.exception(f'Failed to build request for {tag} - skipping')\n continue\n return requests, bit_writes\n\n def _get_tag_info(self, base, attrs) -> Optional[dict]:\n\n def _recurse_attrs(attrs, data):\n cur, *remain = attrs\n curr_tag = _strip_array(cur)\n if not len(remain):\n return data.get(curr_tag)\n else:\n if curr_tag in data:\n return _recurse_attrs(remain, data[curr_tag]['data_type']['internal_tags'])\n else:\n return None\n try:\n data = self._tags.get(_strip_array(base))\n if not len(attrs):\n return data\n else:\n return _recurse_attrs(attrs, data['data_type']['internal_tags'])\n\n except Exception as err:\n self.__log.exception(f'Failed to lookup tag data for {base}, {attrs}')\n raise\n\n def _parse_requested_tags(self, tags):\n requests = {}\n for tag in tags:\n parsed = {}\n try:\n parsed_request = self._parse_tag_request(tag)\n if parsed_request is not None:\n plc_tag, bit, elements, tag_info = parsed_request\n parsed['plc_tag'] = plc_tag\n parsed['bit'] = bit\n parsed['elements'] = elements\n parsed['tag_info'] = tag_info\n else:\n parsed['error'] = 'Failed to parse tag request'\n except RequestError as err:\n parsed['error'] = str(err)\n\n finally:\n requests[tag] = parsed\n return requests\n\n def _parse_tag_request(self, tag: str) -> Optional[Tuple[str, Optional[int], int, dict]]:\n try:\n if tag.endswith('}') and '{' in tag:\n tag, _tmp = tag.split('{')\n elements = int(_tmp[:-1])\n else:\n elements = 1\n\n bit = None\n\n base, *attrs = tag.split('.')\n if base.startswith('Program:'):\n base = f'{base}.{attrs.pop(0)}'\n if len(attrs) and attrs[-1].isdigit():\n _bit = attrs.pop(-1)\n bit = ('bit', int(_bit))\n if not len(attrs):\n tag = base\n else:\n tag = f\"{base}.{''.join(attrs)}\"\n\n tag_info = self._get_tag_info(base, attrs)\n\n if tag_info['data_type'] == 'DWORD' and elements == 1:\n _tag, idx = _get_array_index(tag)\n tag = f'{_tag}[{idx // 32}]'\n bit = ('bool_array', idx)\n elements = 1\n\n return tag, bit, elements, tag_info\n\n except Exception as err:\n # something went wrong parsing the tag path\n raise RequestError('Failed to parse tag read request', tag)\n\n @staticmethod\n def _send_requests(requests):\n\n def _mkkey(t=None, r=None):\n if t is not None:\n return t['tag'], t['elements']\n else:\n return r.tag, r.elements\n\n results = {}\n\n for request in requests:\n try:\n response = request.send()\n except Exception as err:\n if request.type_ != 'multi':\n results[_mkkey(r=request)] = Tag(request.tag, None, None, str(err))\n else:\n for tag in request.tags:\n results[_mkkey(t=tag)] = Tag(tag['tag'], None, None, str(err))\n else:\n if request.type_ != 'multi':\n if response:\n results[_mkkey(r=request)] = Tag(request.tag,\n response.value if request.type_ == 'read' else request.value,\n response.data_type if request.type_ == 'read' else request.data_type)\n else:\n results[_mkkey(r=request)] = Tag(request.tag, None, None, response.error)\n else:\n for tag in response.tags:\n if tag['service_status'] == SUCCESS:\n results[_mkkey(t=tag)] = Tag(tag['tag'], tag['value'], tag['data_type'])\n else:\n results[_mkkey(t=tag)] = Tag(tag['tag'], None, None,\n tag.get('error', 'Unknown Service Error'))\n return results\n\n # --------------------------------------------------------------\n # OLD CODE - to be removed\n #\n # --------------------------------------------------------------\n\n def create_tag_rp(self, tag):\n \"\"\" Creates a request pad\n\n It returns the request packed wrapped around the tag passed.\n If any error it returns none\n \"\"\"\n tags = tag.split('.')\n if tags:\n base, *attrs = tags\n\n if self.use_instance_ids and base in self.tags:\n rp = [CLASS_ID['8-bit'],\n CLASS_CODE['Symbol Object'],\n INSTANCE_ID['16-bit'], b'\\x00',\n pack_uint(self.tags[base]['instance_id'])]\n else:\n base_tag, index = self._find_tag_index(base)\n base_len = len(base_tag)\n rp = [EXTENDED_SYMBOL,\n pack_usint(base_len),\n base_tag]\n if base_len % 2:\n rp.append(PADDING_BYTE)\n if index is None:\n return None\n else:\n rp += index\n\n for attr in attrs:\n attr, index = self._find_tag_index(attr)\n tag_length = len(attr)\n # Create the request path\n attr_path = [EXTENDED_SYMBOL,\n pack_usint(tag_length),\n attr]\n # Add pad byte because total length of Request path must be word-aligned\n if tag_length % 2:\n attr_path.append(PADDING_BYTE)\n # Add any index\n if index is None:\n return None\n else:\n attr_path += index\n rp += attr_path\n\n # At this point the Request Path is completed,\n request_path = b''.join(rp)\n request_path = bytes([len(request_path) // 2]) + request_path\n\n return request_path\n\n return None\n\n def _find_tag_index(self, tag):\n if '[' in tag: # Check if is an array tag\n t = tag[:len(tag) - 1] # Remove the last square bracket\n inside_value = t[t.find('[') + 1:] # Isolate the value inside bracket\n index = inside_value.split(',') # Now split the inside value in case part of multidimensional array\n tag = t[:t.find('[')] # Get only the tag part\n else:\n index = []\n return tag.encode(), self._encode_tag_index(index)\n\n @staticmethod\n def _encode_tag_index(index):\n path = []\n for idx in index:\n val = int(idx)\n if val <= 0xff:\n path += [ELEMENT_ID[\"8-bit\"], pack_usint(val)]\n elif val <= 0xffff:\n path += [ELEMENT_ID[\"16-bit\"], PADDING_BYTE, pack_uint(val)]\n elif val <= 0xfffffffff:\n path += [ELEMENT_ID[\"32-bit\"], PADDING_BYTE, pack_dint(val)]\n else:\n return None # Cannot create a valid request packet\n return path\n\n def _check_reply(self, reply):\n \"\"\" check the replayed message for error\n\n return the status error if unsuccessful, else None\n \"\"\"\n try:\n if reply is None:\n return f'{REPLY_INFO[unpack_dint(reply[:2])]} without reply'\n # Get the type of command\n typ = unpack_uint(reply[:2])\n\n # Encapsulation status check\n if unpack_dint(reply[8:12]) != SUCCESS:\n return get_service_status(unpack_dint(reply[8:12]))\n\n # Command Specific Status check\n if typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_rr_data\"]):\n status = unpack_usint(reply[42:43])\n if status != SUCCESS:\n return f\"send_rr_data reply:{get_service_status(status)} - \" \\\n f\"Extend status:{get_extended_status(reply, 42)}\"\n else:\n return None\n elif typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_unit_data\"]):\n service = reply[46]\n status = _unit_data_status(reply)\n # return None\n if status == INSUFFICIENT_PACKETS and service in (TAG_SERVICES_REPLY['Read Tag'],\n TAG_SERVICES_REPLY['Multiple Service Packet'],\n TAG_SERVICES_REPLY['Read Tag Fragmented'],\n TAG_SERVICES_REPLY['Write Tag Fragmented'],\n TAG_SERVICES_REPLY['Get Instance Attributes List'],\n TAG_SERVICES_REPLY['Get Attributes']):\n return None\n if status == SUCCESS:\n return None\n\n return f\"send_unit_data reply:{get_service_status(status)} - \" \\\n f\"Extend status:{get_extended_status(reply, 48)}\"\n\n except Exception as e:\n raise DataError(e)\n\n def read_tag(self, *tags):\n \"\"\" read tag from a connected plc\n\n Possible combination can be passed to this method:\n - ('Counts') a single tag name\n - (['ControlWord']) a list with one tag or many\n - (['parts', 'ControlWord', 'Counts'])\n\n At the moment there is not a strong validation for the argument passed. The user should verify\n the correctness of the format passed.\n\n :return: None is returned in case of error otherwise the tag list is returned\n \"\"\"\n\n if not self.forward_open():\n self.__log.warning(\"Target did not connected. read_tag will not be executed.\")\n raise DataError(\"Target did not connected. read_tag will not be executed.\")\n\n if len(tags) == 1:\n if isinstance(tags[0], (list, tuple, GeneratorType)):\n return self._read_tag_multi(tags[0])\n else:\n return self._read_tag_single(tags[0])\n else:\n return self._read_tag_multi(tags)\n\n def _read_tag_multi(self, tags):\n tag_bits = defaultdict(list)\n rp_list, tags_read = [[]], [[]]\n request_len = 0\n for tag in tags:\n tag, bit = self._prep_bools(tag, 'BOOL', bits_only=True)\n read = bit is None or tag not in tag_bits\n if bit is not None:\n tag_bits[tag].append(bit)\n if read:\n rp = self.create_tag_rp(tag)\n if rp is None:\n raise DataError(f\"Cannot create tag {tag} request packet. read_tag will not be executed.\")\n else:\n tag_req_len = len(rp) + MULTISERVICE_READ_OVERHEAD\n if tag_req_len + request_len >= self.connection_size:\n rp_list.append([])\n tags_read.append([])\n request_len = 0\n rp_list[-1].append(bytes([TAG_SERVICES_REQUEST['Read Tag']]) + rp + b'\\x01\\x00')\n tags_read[-1].append(tag)\n request_len += tag_req_len\n\n replies = []\n for req_list, tags_ in zip(rp_list, tags_read):\n message_request = self.build_multiple_service(req_list, self._get_sequence())\n msg = self.build_common_packet_format(DATA_ITEM['Connected'], b''.join(message_request),\n ADDRESS_ITEM['Connection Based'], addr_data=self._target_cid, )\n print(msg)\n success, reply = self.send_unit_data(msg)\n if not success:\n raise DataError(f\"send_unit_data returned not valid data - {reply}\")\n\n replies += self._parse_multiple_request_read(reply, tags_, tag_bits)\n return replies\n\n def _read_tag_single(self, tag):\n tag, bit = self._prep_bools(tag, 'BOOL', bits_only=True)\n rp = self.create_tag_rp(tag)\n if rp is None:\n self.__log.warning(f\"Cannot create tag {tag} request packet. read_tag will not be executed.\")\n return None\n else:\n # Creating the Message Request Packet\n message_request = [\n pack_uint(self._get_sequence()),\n bytes([TAG_SERVICES_REQUEST['Read Tag']]), # the Request Service\n # bytes([len(rp) // 2]), # the Request Path Size length in word\n rp, # the request path\n b'\\x01\\x00',\n ]\n request = self.build_common_packet_format(DATA_ITEM['Connected'], b''.join(message_request),\n ADDRESS_ITEM['Connection Based'], addr_data=self._target_cid, )\n success, reply = self.send_unit_data(request)\n\n if success:\n data_type = unpack_uint(reply[50:52])\n typ = DATA_TYPE[data_type]\n try:\n value = UNPACK_DATA_FUNCTION[typ](reply[52:])\n if bit is not None:\n value = bool(value & (1 << bit)) if bit < BITS_PER_INT_TYPE[typ] else None\n return Tag(tag, value, typ)\n except Exception as e:\n raise DataError(e)\n else:\n return Tag(tag, None, None, reply)\n\n @staticmethod\n def _parse_multiple_request_read(reply, tags, tag_bits=None):\n \"\"\" parse the message received from a multi request read:\n\n For each tag parsed, the information extracted includes the tag name, the value read and the data type.\n Those information are appended to the tag list as tuple\n\n :return: the tag list\n \"\"\"\n offset = 50\n position = 50\n tag_bits = tag_bits or {}\n try:\n number_of_service_replies = unpack_uint(reply[offset:offset + 2])\n tag_list = []\n for index in range(number_of_service_replies):\n position += 2\n start = offset + unpack_uint(reply[position:position + 2])\n general_status = unpack_usint(reply[start + 2:start + 3])\n tag = tags[index]\n if general_status == SUCCESS:\n typ = DATA_TYPE[unpack_uint(reply[start + 4:start + 6])]\n value_begin = start + 6\n value_end = value_begin + DATA_FUNCTION_SIZE[typ]\n value = UNPACK_DATA_FUNCTION[typ](reply[value_begin:value_end])\n if tag in tag_bits:\n for bit in tag_bits[tag]:\n val = bool(value & (1 << bit)) if bit < BITS_PER_INT_TYPE[typ] else None\n tag_list.append(Tag(f'{tag}.{bit}', val, 'BOOL'))\n else:\n tag_list.append(Tag(tag, value, typ))\n else:\n tag_list.append(Tag(tag, None, None, get_service_status(general_status)))\n\n return tag_list\n except Exception as e:\n raise DataError(e)\n\n def read_array(self, tag, counts, raw=False):\n \"\"\" read array of atomic data type from a connected plc\n\n At the moment there is not a strong validation for the argument passed. The user should verify\n the correctness of the format passed.\n\n :param tag: the name of the tag to read\n :param counts: the number of element to read\n :param raw: the value should output as raw-value (hex)\n :return: None is returned in case of error otherwise the tag list is returned\n \"\"\"\n\n if not self._target_is_connected:\n if not self.forward_open():\n self.__log.warning(\"Target did not connected. read_tag will not be executed.\")\n raise DataError(\"Target did not connected. read_tag will not be executed.\")\n\n offset = 0\n last_idx = 0\n tags = b'' if raw else []\n\n while offset != -1:\n rp = self.create_tag_rp(tag)\n if rp is None:\n self.__log.warning(f\"Cannot create tag {tag} request packet. read_tag will not be executed.\")\n return None\n else:\n # Creating the Message Request Packet\n message_request = [\n pack_uint(self._get_sequence()),\n bytes([TAG_SERVICES_REQUEST[\"Read Tag Fragmented\"]]), # the Request Service\n # bytes([len(rp) // 2]), # the Request Path Size length in word\n rp, # the request path\n pack_uint(counts),\n pack_dint(offset)\n ]\n msg = self.build_common_packet_format(DATA_ITEM['Connected'],\n b''.join(message_request),\n ADDRESS_ITEM['Connection Based'],\n addr_data=self._target_cid, )\n success, reply = self.send_unit_data(msg)\n if not success:\n raise DataError(f\"send_unit_data returned not valid data - {reply}\")\n\n last_idx, offset = self._parse_fragment(reply, last_idx, offset, tags, raw)\n\n return tags\n\n def _parse_fragment(self, reply, last_idx, offset, tags, raw=False):\n \"\"\" parse the fragment returned by a fragment service.\"\"\"\n\n try:\n status = _unit_data_status(reply)\n data_type = unpack_uint(reply[REPLY_START:REPLY_START + 2])\n fragment_returned = reply[REPLY_START + 2:]\n except Exception as e:\n raise DataError(e)\n\n fragment_returned_length = len(fragment_returned)\n idx = 0\n while idx < fragment_returned_length:\n try:\n typ = DATA_TYPE[data_type]\n if raw:\n value = fragment_returned[idx:idx + DATA_FUNCTION_SIZE[typ]]\n else:\n value = UNPACK_DATA_FUNCTION[typ](fragment_returned[idx:idx + DATA_FUNCTION_SIZE[typ]])\n idx += DATA_FUNCTION_SIZE[typ]\n except Exception as e:\n raise DataError(e)\n if raw:\n tags += value\n else:\n tags.append((last_idx, value))\n last_idx += 1\n\n if status == SUCCESS:\n offset = -1\n elif status == 0x06:\n offset += fragment_returned_length\n else:\n self.__log.warning('{0}: {1}'.format(get_service_status(status), get_extended_status(reply, 48)))\n offset = -1\n\n return last_idx, offset\n\n @staticmethod\n def _prep_bools(tag, typ, bits_only=True):\n \"\"\"\n if tag is a bool and a bit of an integer, returns the base tag and the bit value,\n else returns the tag name and None\n\n \"\"\"\n if typ != 'BOOL':\n return tag, None\n if not bits_only and tag.endswith(']'):\n try:\n base, idx = tag[:-1].rsplit(sep='[', maxsplit=1)\n idx = int(idx)\n base = f'{base}[{idx // 32}]'\n return base, idx\n except Exception:\n return tag, None\n else:\n try:\n base, bit = tag.rsplit('.', maxsplit=1)\n bit = int(bit)\n return base, bit\n except Exception:\n return tag, None\n\n @staticmethod\n def _dword_to_boolarray(tag, bit):\n base, tmp = tag.rsplit(sep='[', maxsplit=1)\n i = int(tmp[:-1])\n return f'{base}[{(i * 32) + bit}]'\n\n def _write_tag_multi_write(self, tags):\n rp_list = [[]]\n tags_added = [[]]\n request_len = 0\n for name, value, typ in tags:\n name, bit = self._prep_bools(name, typ, bits_only=False) # check if bool & if bit of int or bool array\n # Create the request path to wrap the tag name\n rp = self.create_tag_rp(name, multi_requests=True)\n if rp is None:\n self.__log.warning(f\"Cannot create tag {tags} req. packet. write_tag will not be executed\")\n return None\n else:\n try:\n if bit is not None: # then it is a boolean array\n rp = self.create_tag_rp(name, multi_requests=True)\n request = bytes([TAG_SERVICES_REQUEST[\"Read Modify Write Tag\"]]) + rp\n request += b''.join(self._make_write_bit_data(bit, value, bool_ary='[' in name))\n if typ == 'BOOL' and name.endswith(']'):\n name = self._dword_to_boolarray(name, bit)\n else:\n name = f'{name}.{bit}'\n else:\n request = (bytes([TAG_SERVICES_REQUEST[\"Write Tag\"]]) +\n rp +\n pack_uint(DATA_TYPE[typ]) +\n b'\\x01\\x00' +\n PACK_DATA_FUNCTION[typ](value))\n\n tag_req_len = len(request) + MULTISERVICE_WRITE_OVERHEAD\n if tag_req_len + request_len >= self.connection_size:\n rp_list.append([])\n tags_added.append([])\n request_len = 0\n rp_list[-1].append(request)\n request_len += tag_req_len\n except (LookupError, struct.error) as e:\n self.__warning(f\"Tag:{name} type:{typ} removed from write list. Error:{e}.\")\n\n # The tag in idx position need to be removed from the rp list because has some kind of error\n else:\n tags_added[-1].append((name, value, typ))\n\n # Create the message request\n replies = []\n for req_list, tags_ in zip(rp_list, tags_added):\n message_request = self.build_multiple_service(req_list, self._get_sequence())\n msg = self.build_common_packet_format(DATA_ITEM['Connected'],\n b''.join(message_request),\n ADDRESS_ITEM['Connection Based'],\n addr_data=self._target_cid, )\n success, reply = self.send_unit_data(msg)\n if success:\n replies += self._parse_multiple_request_write(tags_, reply)\n else:\n raise DataError(f\"send_unit_data returned not valid data - {reply}\")\n return replies\n\n def _write_tag_single_write(self, tag, value, typ):\n name, bit = self._prep_bools(tag, typ,\n bits_only=False) # check if we're writing a bit of a integer rather than a BOOL\n\n rp = self.create_tag_rp(name)\n if rp is None:\n self.__log.warning(f\"Cannot create tag {tag} request packet. write_tag will not be executed.\")\n return None\n else:\n # Creating the Message Request Packet\n message_request = [\n pack_uint(self._get_sequence()),\n bytes([TAG_SERVICES_REQUEST[\"Read Modify Write Tag\"]\n if bit is not None else TAG_SERVICES_REQUEST[\"Write Tag\"]]),\n # bytes([len(rp) // 2]), # the Request Path Size length in word\n rp, # the request path\n ]\n if bit is not None:\n try:\n message_request += self._make_write_bit_data(bit, value, bool_ary='[' in name)\n except Exception as err:\n raise DataError(f'Unable to write bit, invalid bit number {repr(err)}')\n else:\n message_request += [\n pack_uint(DATA_TYPE[typ]), # data type\n pack_uint(1), # Add the number of tag to write\n PACK_DATA_FUNCTION[typ](value)\n ]\n request = self.build_common_packet_format(DATA_ITEM['Connected'], b''.join(message_request),\n ADDRESS_ITEM['Connection Based'], addr_data=self._target_cid)\n success, reply = self.send_unit_data(request)\n return Tag(tag, value, typ, None if success else reply)\n\n @staticmethod\n def _make_write_bit_data(bit, value, bool_ary=False):\n or_mask, and_mask = 0x00000000, 0xFFFFFFFF\n\n if bool_ary:\n mask_size = 4\n bit = bit % 32\n else:\n mask_size = 1 if bit < 8 else 2 if bit < 16 else 4\n\n if value:\n or_mask |= (1 << bit)\n else:\n and_mask &= ~(1 << bit)\n\n return [pack_uint(mask_size), pack_udint(or_mask)[:mask_size], pack_udint(and_mask)[:mask_size]]\n\n @staticmethod\n def _parse_multiple_request_write(tags, reply):\n \"\"\" parse the message received from a multi request writ:\n\n For each tag parsed, the information extracted includes the tag name and the status of the writing.\n Those information are appended to the tag list as tuple\n\n :return: the tag list\n \"\"\"\n offset = 50\n position = 50\n\n try:\n number_of_service_replies = unpack_uint(reply[offset:offset + 2])\n tag_list = []\n for index in range(number_of_service_replies):\n position += 2\n start = offset + unpack_uint(reply[position:position + 2])\n general_status = unpack_usint(reply[start + 2:start + 3])\n error = None if general_status == SUCCESS else get_service_status(general_status)\n tag_list.append(Tag(*tags[index], error))\n return tag_list\n except Exception as e:\n raise DataError(e)\n\n def write_tag(self, tag, value=None, typ=None):\n \"\"\" write tag/tags from a connected plc\n\n Possible combination can be passed to this method:\n - ('tag name', Value, data type) as single parameters or inside a tuple\n - ([('tag name', Value, data type), ('tag name2', Value, data type)]) as array of tuples\n\n At the moment there is not a strong validation for the argument passed. The user should verify\n the correctness of the format passed.\n\n The type accepted are:\n - BOOL\n - SINT\n - INT\n - DINT\n - REAL\n - LINT\n - BYTE\n - WORD\n - DWORD\n - LWORD\n\n :param tag: tag name, or an array of tuple containing (tag name, value, data type)\n :param value: the value to write or none if tag is an array of tuple or a tuple\n :param typ: the type of the tag to write or none if tag is an array of tuple or a tuple\n :return: None is returned in case of error otherwise the tag list is returned\n \"\"\"\n\n if not self._target_is_connected:\n if not self.forward_open():\n self.__log.warning(\"Target did not connected. write_tag will not be executed.\")\n raise DataError(\"Target did not connected. write_tag will not be executed.\")\n\n if isinstance(tag, (list, tuple, GeneratorType)):\n return self._write_tag_multi_write(tag)\n else:\n if isinstance(tag, tuple):\n name, value, typ = tag\n else:\n name = tag\n return self._write_tag_single_write(name, value, typ)\n\n def write_array(self, tag, values, data_type, raw=False):\n \"\"\" write array of atomic data type from a connected plc\n At the moment there is not a strong validation for the argument passed. The user should verify\n the correctness of the format passed.\n :param tag: the name of the tag to read\n :param data_type: the type of tag to write\n :param values: the array of values to write, if raw: the frame with bytes\n :param raw: indicates that the values are given as raw values (hex)\n \"\"\"\n\n if not isinstance(values, list):\n self.__log.warning(\"A list of tags must be passed to write_array.\")\n raise DataError(\"A list of tags must be passed to write_array.\")\n\n if not self._target_is_connected:\n if not self.forward_open():\n self.__log.warning(\"Target did not connected. write_array will not be executed.\")\n raise DataError(\"Target did not connected. write_array will not be executed.\")\n\n array_of_values = b''\n byte_size = 0\n byte_offset = 0\n\n for i, value in enumerate(values):\n array_of_values += value if raw else PACK_DATA_FUNCTION[data_type](value)\n byte_size += DATA_FUNCTION_SIZE[data_type]\n\n if byte_size >= 450 or i == len(values) - 1:\n # create the message and send the fragment\n rp = self.create_tag_rp(tag)\n if rp is None:\n self.__log.warning(f\"Cannot create tag {tag} request packet write_array will not be executed.\")\n return None\n else:\n # Creating the Message Request Packet\n message_request = [\n pack_uint(self._get_sequence()),\n bytes([TAG_SERVICES_REQUEST[\"Write Tag Fragmented\"]]), # the Request Service\n bytes([len(rp) // 2]), # the Request Path Size length in word\n rp, # the request path\n pack_uint(DATA_TYPE[data_type]), # Data type to write\n pack_uint(len(values)), # Number of elements to write\n pack_dint(byte_offset),\n array_of_values # Fragment of elements to write\n ]\n byte_offset += byte_size\n\n msg = self.build_common_packet_format(\n DATA_ITEM['Connected'],\n b''.join(message_request),\n ADDRESS_ITEM['Connection Based'],\n addr_data=self._target_cid,\n )\n\n success, reply = self.send_unit_data(msg)\n if not success:\n raise DataError(f\"send_unit_data returned not valid data - {reply}\")\n\n array_of_values = b''\n byte_size = 0\n return True\n\n def write_string(self, tag, value, size=82):\n \"\"\"\n Rockwell define different string size:\n STRING STRING_12 STRING_16 STRING_20 STRING_40 STRING_8\n by default we assume size 82 (STRING)\n \"\"\"\n data_tag = \".\".join((tag, \"DATA\"))\n len_tag = \".\".join((tag, \"LEN\"))\n\n # create an empty array\n data_to_send = [0] * size\n for idx, val in enumerate(value):\n try:\n unsigned = ord(val)\n data_to_send[idx] = unsigned - 256 if unsigned > 127 else unsigned\n except IndexError:\n break\n\n str_len = len(value)\n if str_len > size:\n str_len = size\n\n result_len = self.write_tag(len_tag, str_len, 'DINT')\n result_data = self.write_array(data_tag, data_to_send, 'SINT')\n return result_data and result_len\n\n def read_string(self, tag, str_len=None):\n data_tag = f'{tag}.DATA'\n if str_len is None:\n len_tag = f'{tag}.LEN'\n tmp = self.read_tag(len_tag)\n length, _ = tmp or (None, None)\n else:\n length = str_len\n\n if length:\n values = self.read_array(data_tag, length)\n if values:\n _, values = zip(*values)\n chars = ''.join(chr(v + 256) if v < 0 else chr(v) for v in values)\n string, *_ = chars.split('\\x00', maxsplit=1)\n return string\n return None\n\n\ndef _unit_data_status(reply):\n return unpack_usint(reply[48:49])\n\n\ndef _parse_plc_name(response):\n if response.service_status != SUCCESS:\n raise DataError(f'get_plc_name returned status {get_service_status(response.error)}')\n try:\n name_len = unpack_uint(response.data[6:8])\n name = response.data[8: 8 + name_len].decode()\n return name\n except Exception as err:\n raise DataError(err)\n\n\ndef _parse_plc_info(data):\n vendor = unpack_uint(data[0:2])\n product_type = unpack_uint(data[2:4])\n product_code = unpack_uint(data[4:6])\n major_fw = int(data[6])\n minor_fw = int(data[7])\n keyswitch = KEYSWITCH.get(int(data[8]), {}).get(int(data[9]), 'UNKNOWN')\n serial_number = f'{unpack_udint(data[10:14]):0{8}x}'\n device_type_len = int(data[14])\n device_type = data[15:15 + device_type_len].decode()\n\n return {\n 'vendor': VENDORS.get(vendor, 'UNKNOWN'),\n 'product_type': PRODUCT_TYPES.get(product_type, 'UNKNOWN'),\n 'product_code': product_code,\n 'version_major': major_fw,\n 'version_minor': minor_fw,\n 'revision': f'{major_fw}.{minor_fw}',\n 'serial': serial_number,\n 'device_type': device_type,\n 'keyswitch': keyswitch\n }\n\n\ndef writable_value(value, elements, data_type):\n if isinstance(value, bytes):\n return value\n\n try:\n pack_func = PACK_DATA_FUNCTION[data_type]\n if elements > 1:\n return b''.join(pack_func(value[i]) for i in range(elements))\n else:\n return pack_func(value)\n except Exception as err:\n raise RequestError('Unable to create a writable value', err)\n\n\ndef _strip_array(tag):\n if '[' in tag:\n return tag[:tag.find('[')]\n return tag\n\n\ndef _get_array_index(tag):\n if tag.endswith(']') and '[' in tag:\n tag, _tmp = tag.split('[')\n idx = int(_tmp[:-1])\n else:\n idx = 0\n\n return tag, idx\n\n\ndef _tag_return_size(tag_info):\n if tag_info['tag_type'] == 'atomic':\n size = DATA_TYPE_SIZE[tag_info['data_type']]\n else:\n size = tag_info['data_type']['template']['structure_size']\n\n return size + 12 # account for service overhead\n\n\ndef _string_to_sint_array(string, string_len):\n sint_array = [b'\\x00' for _ in range(string_len)]\n if len(string) > string_len:\n string = string[:string_len]\n\n for i, s in enumerate(string):\n unsigned = ord(s)\n sint_array[i] = pack_sint(unsigned - 256 if unsigned > 127 else unsigned)\n\n return b''.join(sint_array)\n\n\ndef _make_string_bytes(tag_data):\n if tag_data['tag_info']['tag_type'] == 'struct':\n string_length = tag_data['tag_info']['data_type'].get('string')\n else:\n return None\n\n if tag_data['elements'] > 1:\n string_bytes = b''\n for val in tag_data['value']:\n str_data = _string_to_sint_array(val, string_length)\n str_bytes = pack_dint(len(val)) + str_data\n string_bytes += str_bytes + b'\\x00' * (len(str_bytes) % 4) # pad data to 4-byte boundaries\n else:\n str_data = _string_to_sint_array(tag_data['value'], string_length)\n string_bytes = pack_dint(len(tag_data['value'])) + str_data\n\n return string_bytes + b'\\x00' * (len(string_bytes) % 4) # pad data to 4-byte boundaries\n\n\ndef _bit_request(tag_data, bit_requests):\n if tag_data.get('bit') is None:\n return None\n\n if tag_data['plc_tag'] not in bit_requests:\n bit_requests[tag_data['plc_tag']] = {'and_mask': 0xFFFFFFFF,\n 'or_mask': 0x00000000,\n 'bits': [],\n 'tag_info': tag_data['tag_info']}\n\n bits_ = bit_requests[tag_data['plc_tag']]\n typ_, bit = tag_data['bit']\n bits_['bits'].append(bit)\n\n if typ_ == 'bool_array':\n bit = bit % 32\n\n if tag_data['value']:\n bits_['or_mask'] |= (1 << bit)\n else:\n bits_['and_mask'] &= ~(1 << bit)\n\n return True\n", "sub_path": "pycomm3/clx.py", "file_name": "clx.py", "file_ext": "py", "file_size_in_byte": 68714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "base.Base", "line_number": 62, "usage_type": "name"}, {"api_name": "const.MIN_VER_INSTANCE_IDS", "line_number": 103, "usage_type": "name"}, {"api_name": "const.REQUEST_PATH_SIZE", "line_number": 126, "usage_type": "argument"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 125, "usage_type": "name"}, {"api_name": "const.CLASS_ID", "line_number": 127, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 128, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 129, "usage_type": "name"}, {"api_name": "const.REQUEST_PATH_SIZE", "line_number": 169, "usage_type": "argument"}, {"api_name": "const.CLASS_ID", "line_number": 170, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 171, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 189, "usage_type": "name"}, {"api_name": "const.EXTENDED_SYMBOL", "line_number": 248, "usage_type": "name"}, {"api_name": "bytes_.pack_usint", "line_number": 248, "usage_type": "call"}, {"api_name": "const.CLASS_ID", "line_number": 254, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 255, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 256, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 258, "usage_type": "call"}, {"api_name": "bytes_.pack_usint", "line_number": 261, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 264, "usage_type": "name"}, {"api_name": "bytes_.unpack_dint", "line_number": 295, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 297, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 301, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 304, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 306, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 308, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 312, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 314, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 316, "usage_type": "call"}, {"api_name": "const.EXTERNAL_ACCESS.get", "line_number": 325, "usage_type": "call"}, {"api_name": "const.EXTERNAL_ACCESS", "line_number": 325, "usage_type": "name"}, {"api_name": "const.SUCCESS", "line_number": 331, "usage_type": "name"}, {"api_name": "const.INSUFFICIENT_PACKETS", "line_number": 333, "usage_type": "name"}, {"api_name": "const.BASE_TAG_BIT", "line_number": 366, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 378, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 379, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 399, "usage_type": "name"}, {"api_name": "const.CLASS_ID", "line_number": 401, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 402, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 403, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 405, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 427, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 434, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 434, "usage_type": "name"}, {"api_name": "bytes_.unpack_dint", "line_number": 436, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 442, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 442, "usage_type": "name"}, {"api_name": "bytes_.unpack_dint", "line_number": 444, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 450, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 450, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 452, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 458, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 458, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 460, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 481, "usage_type": "name"}, {"api_name": "const.CLASS_ID", "line_number": 483, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 484, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 485, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 487, "usage_type": "call"}, {"api_name": "bytes_.pack_dint", "line_number": 488, "usage_type": "call"}, {"api_name": "bytes_.pack_uint", "line_number": 489, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 493, "usage_type": "name"}, {"api_name": "const.INSUFFICIENT_PACKETS", "line_number": 493, "usage_type": "name"}, {"api_name": "const.SUCCESS", "line_number": 498, "usage_type": "name"}, {"api_name": "const.TEMPLATE_MEMBER_INFO_LEN", "line_number": 509, "usage_type": "name"}, {"api_name": "const.TEMPLATE_MEMBER_INFO_LEN", "line_number": 512, "usage_type": "name"}, {"api_name": "const.TEMPLATE_MEMBER_INFO_LEN", "line_number": 513, "usage_type": "argument"}, {"api_name": "bytes_.unpack_uint", "line_number": 551, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 552, "usage_type": "call"}, {"api_name": "bytes_.unpack_udint", "line_number": 553, "usage_type": "call"}, {"api_name": "const.DATA_TYPE", "line_number": 555, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 556, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 559, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 560, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 591, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 591, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 666, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 666, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 666, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 666, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 757, "usage_type": "name"}, {"api_name": "base.startswith", "line_number": 812, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 801, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 801, "usage_type": "name"}, {"api_name": "const.SUCCESS", "line_number": 866, "usage_type": "name"}, {"api_name": "const.CLASS_ID", "line_number": 889, "usage_type": "name"}, {"api_name": "const.CLASS_CODE", "line_number": 890, "usage_type": "name"}, {"api_name": "const.INSTANCE_ID", "line_number": 891, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 892, "usage_type": "call"}, {"api_name": "const.EXTENDED_SYMBOL", "line_number": 896, "usage_type": "name"}, {"api_name": "bytes_.pack_usint", "line_number": 897, "usage_type": "call"}, {"api_name": "const.PADDING_BYTE", "line_number": 900, "usage_type": "argument"}, {"api_name": "const.EXTENDED_SYMBOL", "line_number": 910, "usage_type": "name"}, {"api_name": "bytes_.pack_usint", "line_number": 911, "usage_type": "call"}, {"api_name": "const.PADDING_BYTE", "line_number": 915, "usage_type": "argument"}, {"api_name": "const.ELEMENT_ID", "line_number": 947, "usage_type": "name"}, {"api_name": "bytes_.pack_usint", "line_number": 947, "usage_type": "call"}, {"api_name": "const.ELEMENT_ID", "line_number": 949, "usage_type": "name"}, {"api_name": "const.PADDING_BYTE", "line_number": 949, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 949, "usage_type": "call"}, {"api_name": "const.ELEMENT_ID", "line_number": 951, "usage_type": "name"}, {"api_name": "const.PADDING_BYTE", "line_number": 951, "usage_type": "name"}, {"api_name": "bytes_.pack_dint", "line_number": 951, "usage_type": "call"}, {"api_name": "const.REPLY_INFO", "line_number": 963, "usage_type": "name"}, {"api_name": "bytes_.unpack_dint", "line_number": 963, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 965, "usage_type": "call"}, {"api_name": "bytes_.unpack_dint", "line_number": 968, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 968, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 969, "usage_type": "call"}, {"api_name": "bytes_.unpack_dint", "line_number": 969, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 972, "usage_type": "call"}, {"api_name": "const.ENCAPSULATION_COMMAND", "line_number": 972, "usage_type": "name"}, {"api_name": "bytes_.unpack_usint", "line_number": 973, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 974, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 975, "usage_type": "call"}, {"api_name": "const.get_extended_status", "line_number": 976, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 979, "usage_type": "call"}, {"api_name": "const.ENCAPSULATION_COMMAND", "line_number": 979, "usage_type": "name"}, {"api_name": "const.INSUFFICIENT_PACKETS", "line_number": 983, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 983, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 984, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 985, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 986, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 987, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REPLY", "line_number": 988, "usage_type": "name"}, {"api_name": "const.SUCCESS", "line_number": 990, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 993, "usage_type": "call"}, {"api_name": "const.get_extended_status", "line_number": 994, "usage_type": "call"}, {"api_name": "types.GeneratorType", "line_number": 1018, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1026, "usage_type": "call"}, {"api_name": "const.MULTISERVICE_READ_OVERHEAD", "line_number": 1039, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1044, "usage_type": "name"}, {"api_name": "const.DATA_ITEM", "line_number": 1051, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1052, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1070, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1071, "usage_type": "name"}, {"api_name": "const.DATA_ITEM", "line_number": 1076, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1077, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 1081, "usage_type": "call"}, {"api_name": "const.DATA_TYPE", "line_number": 1082, "usage_type": "name"}, {"api_name": "bytes_.UNPACK_DATA_FUNCTION", "line_number": 1084, "usage_type": "name"}, {"api_name": "const.BITS_PER_INT_TYPE", "line_number": 1086, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 1106, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1110, "usage_type": "call"}, {"api_name": "bytes_.unpack_usint", "line_number": 1111, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 1113, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 1114, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 1114, "usage_type": "call"}, {"api_name": "bytes_.DATA_FUNCTION_SIZE", "line_number": 1116, "usage_type": "name"}, {"api_name": "bytes_.UNPACK_DATA_FUNCTION", "line_number": 1117, "usage_type": "name"}, {"api_name": "const.BITS_PER_INT_TYPE", "line_number": 1120, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 1125, "usage_type": "call"}, {"api_name": "bytes_.pack_uint", "line_number": 1160, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1161, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1164, "usage_type": "call"}, {"api_name": "bytes_.pack_dint", "line_number": 1165, "usage_type": "call"}, {"api_name": "const.DATA_ITEM", "line_number": 1167, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1169, "usage_type": "name"}, {"api_name": "bytes_.unpack_uint", "line_number": 1184, "usage_type": "call"}, {"api_name": "const.REPLY_START", "line_number": 1184, "usage_type": "name"}, {"api_name": "const.REPLY_START", "line_number": 1185, "usage_type": "name"}, {"api_name": "const.DATA_TYPE", "line_number": 1193, "usage_type": "name"}, {"api_name": "bytes_.DATA_FUNCTION_SIZE", "line_number": 1195, "usage_type": "name"}, {"api_name": "bytes_.UNPACK_DATA_FUNCTION", "line_number": 1197, "usage_type": "name"}, {"api_name": "bytes_.DATA_FUNCTION_SIZE", "line_number": 1197, "usage_type": "name"}, {"api_name": "bytes_.DATA_FUNCTION_SIZE", "line_number": 1198, "usage_type": "name"}, {"api_name": "const.SUCCESS", "line_number": 1207, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 1212, "usage_type": "call"}, {"api_name": "const.get_extended_status", "line_number": 1212, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1263, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1270, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1272, "usage_type": "call"}, {"api_name": "const.DATA_TYPE", "line_number": 1272, "usage_type": "name"}, {"api_name": "bytes_.PACK_DATA_FUNCTION", "line_number": 1274, "usage_type": "name"}, {"api_name": "const.MULTISERVICE_WRITE_OVERHEAD", "line_number": 1276, "usage_type": "name"}, {"api_name": "struct.error", "line_number": 1283, "usage_type": "attribute"}, {"api_name": "const.DATA_ITEM", "line_number": 1294, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1296, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1316, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1317, "usage_type": "name"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1318, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1329, "usage_type": "call"}, {"api_name": "const.DATA_TYPE", "line_number": 1329, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1330, "usage_type": "call"}, {"api_name": "bytes_.PACK_DATA_FUNCTION", "line_number": 1331, "usage_type": "name"}, {"api_name": "const.DATA_ITEM", "line_number": 1333, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1334, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1353, "usage_type": "call"}, {"api_name": "bytes_.pack_udint", "line_number": 1353, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1368, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1372, "usage_type": "call"}, {"api_name": "bytes_.unpack_usint", "line_number": 1373, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 1374, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 1374, "usage_type": "call"}, {"api_name": "types.GeneratorType", "line_number": 1413, "usage_type": "name"}, {"api_name": "bytes_.PACK_DATA_FUNCTION", "line_number": 1446, "usage_type": "name"}, {"api_name": "bytes_.DATA_FUNCTION_SIZE", "line_number": 1447, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1458, "usage_type": "call"}, {"api_name": "const.TAG_SERVICES_REQUEST", "line_number": 1459, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1462, "usage_type": "call"}, {"api_name": "const.DATA_TYPE", "line_number": 1462, "usage_type": "name"}, {"api_name": "bytes_.pack_uint", "line_number": 1463, "usage_type": "call"}, {"api_name": "bytes_.pack_dint", "line_number": 1464, "usage_type": "call"}, {"api_name": "const.DATA_ITEM", "line_number": 1470, "usage_type": "name"}, {"api_name": "const.ADDRESS_ITEM", "line_number": 1472, "usage_type": "name"}, {"api_name": "autologging.logged", "line_number": 61, "usage_type": "name"}, {"api_name": "bytes_.unpack_usint", "line_number": 1530, "usage_type": "call"}, {"api_name": "const.SUCCESS", "line_number": 1534, "usage_type": "name"}, {"api_name": "const.get_service_status", "line_number": 1535, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1537, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1545, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1546, "usage_type": "call"}, {"api_name": "bytes_.unpack_uint", "line_number": 1547, "usage_type": "call"}, {"api_name": "const.KEYSWITCH.get", "line_number": 1550, "usage_type": "call"}, {"api_name": "const.KEYSWITCH", "line_number": 1550, "usage_type": "name"}, {"api_name": "bytes_.unpack_udint", "line_number": 1551, "usage_type": "call"}, {"api_name": "const.VENDORS.get", "line_number": 1556, "usage_type": "call"}, {"api_name": "const.VENDORS", "line_number": 1556, "usage_type": "name"}, {"api_name": "const.PRODUCT_TYPES.get", "line_number": 1557, "usage_type": "call"}, {"api_name": "const.PRODUCT_TYPES", "line_number": 1557, "usage_type": "name"}, {"api_name": "bytes_.PACK_DATA_FUNCTION", "line_number": 1573, "usage_type": "name"}, {"api_name": "const.DATA_TYPE_SIZE", "line_number": 1600, "usage_type": "name"}, {"api_name": "bytes_.pack_sint", "line_number": 1614, "usage_type": "call"}, {"api_name": "bytes_.pack_dint", "line_number": 1629, "usage_type": "call"}, {"api_name": "bytes_.pack_dint", "line_number": 1633, "usage_type": "call"}]} +{"seq_id": "95110167", "text": "import numpy as np\nimport time\nimport pygame\nfrom functools import reduce\nimport random as ra\nfrom random import randint as ri\nimport math as ma\nfrom pygame.locals import *\nfrom oop_phy_pygame import *\n\n# инициализация pygame\npygame.init()\n\n# масштаб\np = 1.91\nscax = scay = 50 #40*p#87.5*p\n\n# сдвиг, в % от всего изображения\nindx, indy = 0, 0 # percent\n\n# масса\nm1 = -1 #ra.randint(3, 7)\nm2 = 1*10**0.5 #ra.randint(3, 7)\n\n# положение тел\nxp1, yp1 = 0, 0 #ra.randint(-3, 3), ra.randint(-3, 3) -2.5\nxp2, yp2 = 0, 3 #ra.randint(-3, 3), ra.randint(-3, 3)\n\n# нач скорость\nxv1, yv1 = 0, 0 #ra.randint(-3, 3)*10**-4, ra.randint(-3, 3)*10**-4 5.3153\nxv2, yv2 = 4, 0 #ra.randint(-3, 3)*10**-4, ra.randint(-3, 3)*10**-4\n\n# шаг времени\nstep = 1*10**-6.75\n\n# границы\nborder = (0, 0) #(16, 8)\n\n# реагирует ли тело на другие тела\nreact1 = 1\nreact2 = 1 #\n\n# реагируют ли другие тела на тело \nreall1 = 1\nreall2 = 1\n\n# цвет тел\ncol1 = (0, 0, 255)\ncol2 = (255, 0, 0)\n\n# радиус пути\nrpath = 1\n\n# радиус отрисовки тел\nr1 = r2 = r3 = r4 = r_n = 10\n\n# отрисовка тел\ndraw1 = 1\ndraw2 = 1 #\ndraw_n = 1\n\n# максимальное количество точек в массиве пути\nmax = 750\n\n# соединять ли точки пути\nconn = bool( 1 )\n\n# движение\nind_n = 0.005\nind_c = 1\n\n#\nsca_n = 0.001\nsca_c = 1\n\n\n# отрисовка векторов\ndr_vec1 = 1 #\ndr_vec2 = 1\ndr_vec_n = 1\n\n# толщина линии вектора нач скорости\n# при создании нового тела\nst_vec_r = 6\n\n# частота отрисовки\ndr_fr_path = 50 #+ 4*52\ndr_fr_body = 300\n\n# импорт картинки, реализация экрана\nscr = (1540, 801) #(1080, 2340)\npath, bgr = main_relise(\"space2.jpg\", scr)\nstar = img_imp(\"star2.png\", 50, (255, 255, 255))\n\n# реализация текста\ndr_txt = bool( 1 )\nf_siz = 30\nnum_symol = 6\nst_point = (15, 15)\nfram_c = (127, 127, 127)\nfont, bla, black = font_rel(f_siz, num_symol, 1, fram_c)\n\n# параметры для шоу \"смена частоты отрисовки\"\ncha = False\nconv_n = [True for _ in range(3)]\nend_n = [True for _ in range(2)]\nconv_v = 5.125\nend_v = 20.5\ni_conv = i_end = end_in = 0\n\n# создание экземпляра класса\na = body(m1, [xp1, yp1], [xv1, yv1], (step, border, react1, reall1), (col1, rpath, r1, draw1, dr_vec1, max, conn))\nb = body(m2, [xp2, yp2], [xv2, yv2], (step, border, react2, reall2), (col2, rpath, r2, draw2, dr_vec2, max, conn), model=star)\n\n# массив со всеми телами, что\n# будут использоваться в симуляции\nall_bodies = [a, b]\n\n# создаём \"упаковки\" для информации\ntxt = dr_txt, st_point, font, bla, black\ndraw = scr, path, bgr, dr_fr_path, dr_fr_body, max, conn\ncorrection = scax, scay, indx, indy, ind_n, ind_c, sca_n, sca_c\nshow = cha, conv_n, end_n, conv_v, end_v, i_conv\nphy = step, border, rpath, r_n, draw_n, dr_vec_n, st_vec_r\n\nmain_f(all_bodies, phy, draw, txt, show, correction)\n", "sub_path": "oop_phy_pyg_values.py", "file_name": "oop_phy_pyg_values.py", "file_ext": "py", "file_size_in_byte": 3205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.init", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "633401312", "text": "# -*- coding: utf-8 -*-\n# Author: Ji Yang \n# License: MIT\n\nimport random\n\nimport numpy as np\n\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\npadding = transforms.Compose([transforms.Resize(160),\n transforms.Pad(30, padding_mode='reflect'),\n transforms.RandomRotation((-8, 8)),\n transforms.RandomApply([transforms.RandomAffine(0, shear=8)]),\n transforms.RandomCrop(192)])\n\nrescaling = transforms.Compose([transforms.Resize(192),\n transforms.RandomApply([transforms.RandomAffine(0, shear=8)]),\n transforms.RandomRotation((-8, 8))])\n\ncrop_rescaling = transforms.Compose([transforms.RandomCrop(84),\n transforms.Resize(160),\n transforms.Pad(30, padding_mode='reflect'),\n transforms.RandomRotation((-6, 6)),\n transforms.RandomCrop(192)])\n\nstrong_augmentation_transform = transforms.Compose([transforms.RandomChoice([padding, rescaling, crop_rescaling]),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([transforms.ColorJitter(brightness=0.1,\n contrast=0.1,\n saturation=0.1,\n hue=0.1)]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\nbasic_augmentation_transform = transforms.Compose([transforms.RandomChoice([padding, rescaling]),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ToTensor()\n ])\n\nval_test_transform = transforms.Compose([transforms.Resize(192),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\n\nclass SaltDataset(Dataset):\n def __init__(self, image, mask=None, transform=strong_augmentation_transform, is_train=True):\n self.image = image\n self.mask = mask\n self.transform = transform\n self.is_train = is_train\n\n def __len__(self):\n return self.image.shape[0]\n\n def __getitem__(self, idx):\n image = Image.fromarray(np.uint8(self.image[idx] * 255))\n seed = random.randint(6, 6 ** 6)\n\n random.seed(seed)\n if self.transform is not None:\n image = self.transform(image)\n else:\n image = val_test_transform(image)\n\n if self.is_train:\n mask = Image.fromarray(np.uint8(self.mask[idx]))\n random.seed(seed)\n if self.transform is not None:\n mask = self.transform(mask)\n else:\n mask = val_test_transform(mask)\n mask = (mask > 0.5).float() # round resize artifact\n return {'image': image, 'mask': mask}\n\n return {'image': image}\n", "sub_path": "salt_dataset_192.py", "file_name": "salt_dataset_192.py", "file_ext": "py", "file_size_in_byte": 3801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms.Pad", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 15, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.Resize", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.Pad", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomChoice", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomChoice", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 52, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 63, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 73, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "253514360", "text": "import logging\nimport MySQLdb\nfrom common import ItemContainsNull\n\nclass MySqlPipeline(object):\n\tdef open_spider(self, spider):\n\t\tself.conn = MySQLdb.connect('IP', 'USERNAME', 'PASSWORD', 'TABLENAME', charset=\"utf8\", use_unicode=True)\n\t\tself.cursor = self.conn.cursor()\n\n\tdef close_spider(self, spider):\n\t\tself.conn.close();\n\n\tdef process_item(self, item, spider):\n\t\tfor i in item.iteritems():\n\t\t\tif not ItemContainsNull(i):\n\t\t\t\tself.cursor.execute(\"\"\"INSERT INTO Product (Type, Price, Date, Quantity, Store, Name) \n\t\t\t\t\t\t\tVALUES (%s, %s, %s, %s, %s, %s)\"\"\", \n\t\t\t\t\t\t (i[1]['itemType'], \n\t\t\t\t\t\t\ti[1]['price'],\n\t\t\t\t\t\t\ti[1]['date'],\n\t\t\t\t\t\t\ti[1]['quantity'],\n\t\t\t\t\t\t\ti[1]['store'],\n\t\t\t\t\t\t\ti[1]['name']))\n\t\t\t\tself.conn.commit()", "sub_path": "PriceInformation/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "MySQLdb.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "common.ItemContainsNull", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "81257666", "text": "from Crypto.Cipher import AES\nfrom Crypto.Random import random\nfrom Crypto.Util.number import long_to_bytes,bytes_to_long\n\n\nwith open(\"flag_cipher\",\"r\") as f:\n\tc = f.read()\n\tf.close()\n\nc = [c[i:i+32] for i in range(0, len(c), 32)]\n\nfor i in range(1, len(c)-1):\n\tcipher = AES.new(c[i], AES.MODE_ECB, \"\")\n\tprint(cipher.decrypt(c[i+1]))", "sub_path": "crypto/[AFCTF2018]MyOwnCBC/fuck.py", "file_name": "fuck.py", "file_ext": "py", "file_size_in_byte": 333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Crypto.Cipher.AES.new", "line_number": 13, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 13, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "383096748", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport bicycledataprocessor as bdp\nimport canonical_system_id as csi\n\n# This gives the proportion of the lateral force which should be added to the\n# steer torque and roll torque equations in the canonical equations.\nF = {}\nfor rider in ['Charlie', 'Jason', 'Luke']:\n F[rider] = csi.whipple_state_space(rider, 1.0)[2][2:]\n\n# find the runs that we want to id\ndataset = bdp.DataSet()\ndataset.open()\n\ntable = dataset.database.root.runTable\n\nruns = []\nfor row in table.iterrows():\n con = []\n con.append(row['Rider'] in ['Jason', 'Charlie', 'Luke'])\n con.append(row['Maneuver'] in ['Balance',\n 'Track Straight Line',\n 'Balance With Disturbance',\n 'Track Straight Line With Disturbance'])\n con.append(row['Environment'] == 'Horse Treadmill')\n con.append(row['corrupt'] is not True)\n con.append(int(row['RunID']) > 100)\n if False not in con:\n runs.append(row['RunID'])\n\ndataset.close()\n\nidMassMats = np.zeros((len(runs), 2, 2))\nidDampMats = np.zeros((len(runs), 2, 2))\nidStifMats = np.zeros((len(runs), 2, 2))\nspeeds = np.nan * np.ones(len(runs))\n\nthetaDelta = ['m21', 'm22', 'c21', 'c22', 'k21', 'k22']\n\nerrors = []\nfor i, r in enumerate(runs):\n try:\n trial = bdp.Run(r, dataset, filterFreq=15.)\n except bdp.bdpexceptions.TimeShiftError:\n errors.append(r)\n except IndexError:\n errors.append(r)\n else:\n if trial.metadata['Maneuver'].endswith('Disturbance'):\n thetaPhi = ['m11', 'm12', 'c11', 'c12', 'k11', 'k12']\n else:\n thetaPhi = ['c11', 'c12', 'k11', 'k12']\n\n v = trial.taskSignals['ForwardSpeed'].mean()\n speeds[i] = v\n g = trial.bicycleRiderParameters['g']\n\n M, C1, K0, K2 = trial.bicycle.canonical(nominal=True)\n C = C1 * v\n K = K0 * g + K2 * v**2\n canon = (M, C, K)\n\n timeSeries = csi.time_series(trial, F)\n M_id, C_id, K_id = csi.compute_unknowns(thetaPhi, thetaDelta,\n timeSeries, canon)\n idMassMats[i] = M_id\n idDampMats[i] = C_id\n idStifMats[i] = K_id\n\n #forces_id = np.dot(M_id, accels) + np.dot(C_id, rates) + np.dot(K_id,\n #coordinates)\n#\n #time = trial.taskSignals['ForwardSpeed'].time()\n\n #fig = plt.figure()\n #for i in range(2):\n #ax = fig.add_subplot(2, 1, i + 1)\n #ax.plot(time, forces[i], time, forces_id[i])\n #ax.legend(('Experimental', 'Identified'))\n #fig.show()\n\nfig = plt.figure()\nfor i in range(2):\n ax = fig.add_subplot(2, 6, 1 + i * 6)\n ax.plot(speeds, idMassMats[:, i, 0], '.')\n\n ax = fig.add_subplot(2, 6, 2 + i * 6)\n ax.plot(speeds, idMassMats[:, i, 1], '.')\n\n ax = fig.add_subplot(2, 6, 3 + i * 6)\n ax.plot(speeds, idDampMats[:, i, 0], '.')\n\n ax = fig.add_subplot(2, 6, 4 + i * 6)\n ax.plot(speeds, idDampMats[:, i, 1], '.')\n\n ax = fig.add_subplot(2, 6, 5 + i * 6)\n ax.plot(speeds, idStifMats[:, i, 0], '.')\n\n ax = fig.add_subplot(2, 6, 6 + i * 6)\n ax.plot(speeds, idStifMats[:, i, 1], '.')\nfig.show()\n", "sub_path": "scripts/canonicalid/fit_canonical.py", "file_name": "fit_canonical.py", "file_ext": "py", "file_size_in_byte": 3197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "canonical_system_id.whipple_state_space", "line_number": 10, "usage_type": "call"}, {"api_name": "bicycledataprocessor.DataSet", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 37, "usage_type": "call"}, {"api_name": "bicycledataprocessor.Run", "line_number": 44, "usage_type": "call"}, {"api_name": "bicycledataprocessor.bdpexceptions", "line_number": 45, "usage_type": "attribute"}, {"api_name": "canonical_system_id.time_series", "line_number": 64, "usage_type": "call"}, {"api_name": "canonical_system_id.compute_unknowns", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "506532697", "text": "import os\nfrom dotenv import load_dotenv\nimport pymongo\nimport datetime\nfrom bson.objectid import ObjectId\nfrom flask import Flask, request, render_template, redirect, url_for, session, flash\nfrom flask_login import LoginManager, UserMixin, current_user, login_user, logout_user, login_required\nimport bcrypt\nfrom functools import wraps\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.getenv('SECRET_KEY')\n\n## necessary for python-dotenv ##\nAPP_ROOT = os.path.join(os.path.dirname(__file__), '..') # refers to application_top\ndotenv_path = os.path.join(APP_ROOT, '.env')\nload_dotenv(dotenv_path)\n\nmongo = os.getenv('MONGO')\n\n\nclient = pymongo.MongoClient(mongo)\n\ndb = client['bucket_list'] # Mongo collection\nusers = db['users'] # Mongo document\nroles = db['roles'] # Mongo document\ncategories = db['categories'] # Mongo document\nbucketList = db['bucketList'] # Mongo document\nstatus = db['status']\n\nlogin = LoginManager()\nlogin.init_app(app)\nlogin.login_view = 'login'\n\n@login.user_loader\ndef load_user(username):\n u = users.find_one({\"username\": username})\n if not u:\n return None\n return User(username=u['username'], role=u['role'], id=u['_id'])\n\nclass User:\n def __init__(self, id, username, role):\n self._id = id\n self.username = username\n self.role = role\n\n @staticmethod\n def is_authenticated():\n return True\n\n @staticmethod\n def is_active():\n return True\n\n @staticmethod\n def is_anonymous():\n return False\n\n def get_id(self):\n return self.username\n\n'''\n @staticmethod\n def check_password(password_hash, password):\n return check_password_hash(password_hash, password)\n'''\n\n### custom wrap to determine role access ### \ndef roles_required(*role_names):\n def decorator(original_route):\n @wraps(original_route)\n def decorated_route(*args, **kwargs):\n if not current_user.is_authenticated:\n print('The user is not authenticated.')\n return redirect(url_for('login'))\n \n print(current_user.role)\n print(role_names)\n if not current_user.role in role_names:\n print('The user does not have this role.')\n return redirect(url_for('login'))\n else:\n print('The user is in this role.')\n return original_route(*args, **kwargs)\n return decorated_route\n return decorator\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n@app.route('/register')\ndef register():\n return render_template('register.html')\n\n\n@app.route('/add-user', methods=['GET', 'POST'])\ndef add_user():\n if request.method == 'POST':\n form = request.form\n \n \n \n email = users.find_one({\"email\": request.form['email']})\n if email:\n flash('This email is already registered.', 'warning')\n return 'This email has already been registered. back page on your browser to change email.'\n username = users.find_one({\"username\": request.form['username']})\n if username:\n flash('This username is already registered.', 'warning')\n return 'This username has already been registered. back page on your browser to change username.'\n \n \n new_user = {\n 'first_name': form['first_name'],\n 'last_name': form['last_name'],\n 'username' : form['username'],\n 'email': form['email'],\n 'password': form['password'],\n 'role': form['role'],\n 'date_added': datetime.datetime.now(),\n 'date_modified': datetime.datetime.now()\n }\n users.insert_one(new_user)\n flash(new_user['username'] + ' user has been added.', 'success')\n \n return render_template('login.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n\n if request.method == 'POST':\n user = users.find_one({\"username\": request.form['username']})\n if user and user['password'] == request.form['password']:\n user_obj = User(username=user['username'], role=user['role'], id=user['_id'])\n login_user(user_obj)\n next_page = request.args.get('next')\n\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('view_activities')\n return redirect(next_page)\n flash(\"Logged in successfully!\", category='success')\n return redirect(request.args.get(\"next\") or url_for(\"view_activities\"))\n\n flash(\"Wrong username or password!\", category='danger')\n return render_template('login.html')\n\n\n\n\n\n\n\n\n\n\n\n@app.route('/logout', methods=['GET', 'POST'])\ndef logout():\n logout_user()\n flash('You have successfully logged out.', 'success')\n return redirect(url_for('login'))\n\n\n@app.route('/my-account/', methods=['GET', 'POST'])\n@login_required\n@roles_required('user', 'contributor', 'admin')\ndef my_account(user_id):\n edit_account = users.find_one({'_id': ObjectId(user_id)})\n if edit_account:\n return render_template('my-account.html', user=edit_account)\n flash('User not found.', 'warning')\n return redirect(url_for('index'))\n\n@app.route('/update-myaccount/', methods=['GET', 'POST'])\n@login_required\n@roles_required('contributor', 'admin')\ndef update_myaccount(user_id):\n if request.method == 'POST':\n form = request.form\n\n password = request.form['password']\n\n users.update({'_id': ObjectId(user_id)},\n {\n 'first_name': form['first_name'],\n 'last_name': form['last_name'],\n 'username' : form['username'],\n 'email': form['email'],\n 'password': password,\n 'role': form['role'],\n 'date_added': form['date_added'],\n 'date_modified': datetime.datetime.now()\n })\n update_user = users.find_one({'_id': ObjectId(user_id)})\n flash(update_user['username'] + ' has been updated.', 'success')\n return redirect(url_for('admin_users'))\n return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())\n\n\n\n@app.route('/about', methods=['GET', 'POST'])\ndef about():\n return render_template('about.html')\n\n########## Admin functionality -- User management ##########\n\n@app.route('/admin/users', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_users():\n return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())\n\n@app.route('/admin/add-user', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_add_user():\n if request.method == 'POST':\n form = request.form\n \n password = request.form['password']\n \n email = users.find_one({\"email\": request.form['email']})\n if email:\n flash('This email is already registered.', 'warning')\n return 'This email has already been registered.'\n new_user = {\n 'first_name': form['first_name'],\n 'last_name': form['last_name'],\n 'username' : form['username'],\n 'email': form['email'],\n 'password': password,\n 'role': form['role'],\n 'date_added': datetime.datetime.now(),\n 'date_modified': datetime.datetime.now()\n }\n users.insert_one(new_user)\n flash(new_user['username'] + ' user has been added.', 'success')\n return redirect(url_for('admin_users'))\n return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())\n\n@app.route('/admin/delete-user/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_delete_user(user_id):\n delete_user = users.find_one({'_id': ObjectId(user_id)})\n if delete_user:\n users.delete_one(delete_user)\n flash(delete_user['username'] + ' has been deleted.', 'warning')\n return redirect(url_for('admin_users'))\n flash('User not found.', 'warning')\n return redirect(url_for('admin_users'))\n\n@app.route('/admin/edit-user/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_edit_user(user_id):\n edit_user = users.find_one({'_id': ObjectId(user_id)})\n if edit_user:\n return render_template('edit-user.html', user=edit_user, all_roles=roles.find())\n flash('User not found.', 'warning')\n return redirect(url_for('admin_users'))\n\n@app.route('/admin/update-user/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_update_user(user_id):\n if request.method == 'POST':\n form = request.form\n\n password = request.form['password']\n\n users.update({'_id': ObjectId(user_id)},\n {\n 'first_name': form['first_name'],\n 'last_name': form['last_name'],\n 'username' : form['username'],\n 'email': form['email'],\n 'password': password,\n 'role': form['role'],\n 'date_added': form['date_added'],\n 'date_modified': datetime.datetime.now()\n })\n update_user = users.find_one({'_id': ObjectId(user_id)})\n flash(update_user['username'] + ' has been updated.', 'success')\n return redirect(url_for('admin_users'))\n return render_template('user-admin.html', all_roles=roles.find(), all_users=users.find())\n\n\n\n\n\n\n\n\n\n@app.route('/activities/add-share-status', methods=['POST'])\n@login_required\n@roles_required('admin')\ndef add_share_status():\n if request.method == 'POST':\n form = request.form\n share_status = users.find_one({\"share_status\": request.form['new_share_status']})\n if share_status:\n flash('This status is already registered.', 'warning')\n return url_for('/admin_users')\n new_share_status = {\n 'share_status': form['share_status'],\n }\n status.insert_one(new_share_status)\n flash(new_status['share_status'] + ' has been added.', 'success')\n return redirect(url_for('admin_activities'))\n return render_template('activity-admin.html', all_status=status.find()) \n@app.route('/activities/delete_share_status/', methods=['GET'])\n@login_required\n@roles_required('admin')\ndef delete_share_status(share_status_id):\n delete_share_status = status.find_one({'_id': ObjectId(category_id)})\n if delete_share_status:\n status.delete_one(delete_share_status)\n flash(delete_share_status['share_status'] + ' has been deleted.', 'danger')\n return redirect(url_for('admin_activities'))\n flash('activity not found.', 'warning')\n return redirect(url_for('admin_activities'))\n\n########## categories ##########\n@app.route('/admin/categories', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_categories():\n return render_template('admin-categories.html', all_categories=categories.find())\n\n\n@app.route('/add-category', methods=[ 'GET','POST'])\n@login_required\n@roles_required('admin')\ndef add_category():\n if request.method == 'POST':\n form = request.form\n \n new_category = {\n \n 'category_name' : form['category_name']\n \n \n }\n categories.insert_one(new_category)\n flash('New category has been added.', 'success')\n \n return render_template('admin-categories.html', all_categories=categories.find())\n\n@app.route('/categories/edit-category/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef edit_category(category_id):\n edit_category = categories.find_one({'_id': ObjectId(category_id)})\n if edit_category:\n return render_template('edit-category.html', category=edit_category, all_categories=categories.find(), all_status=status.find())\n flash('category not found.', 'danger')\n return redirect(url_for('admin_categories'))\n@app.route('/categories/update-category/', methods=['POST'])\n@login_required\n@roles_required('admin')\ndef update_category(category_id):\n if request.method == 'POST':\n form = request.form\n categories.update({'_id': ObjectId(category_id)},\n {\n 'category_name' : form['category_name']\n \n })\n update_category = categories.find_one({'_id': ObjectId(category_id)})\n flash(update_category['category_name'] + ' has been updated.', 'success')\n return redirect(url_for('admin_categories'))\n return render_template('edit-category.html', all_categories=categories.find())\n@app.route('/categories/delete-category/', methods=['POST'])\n@login_required\n@roles_required('admin')\ndef delete_category(category_id):\n delete_category = categories.find_one({'_id': ObjectId(category_id)})\n if delete_category:\n categories.delete_one(delete_category)\n flash(delete_category['category_name'] + ' has been deleted.', 'danger')\n return redirect(url_for('admin_categories'))\n flash('activity not found.', 'warning')\n return redirect(url_for('admin_categories'))\n \n \n########## activities ##########\n@app.route('/activities', methods=['GET', 'POST'])\n\ndef view_activities():\n return render_template('activities.html', all_bucketList=bucketList.find())\n\n\n@app.route('/jump', methods=['GET', 'POST'])\ndef view_jump():\n return \"jump\"\n\n\n\n\n\n@app.route('/search-results', methods=['GET', 'POST'])\ndef view_search_results():\n return render_template('search-results.html', search_string=search_string, all_bucketList=bucketList.find())\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n if request.method == 'POST':\n form = request.form\n search_string = request.form['search_string']\n return render_template('search-results.html', search_string=search_string, all_bucketList=bucketList.find())\n\n\n\n\n\n\n\n@app.route('/activities/my-bucket-list', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin', 'contributor')\ndef view_my_activities():\n return render_template('my-bucket-list.html', all_bucketList=bucketList.find())\n\n\n\n@app.route('/activities/activities', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef admin_activities():\n return render_template('activity-admin.html', all_categories=categories.find(), all_bucketList=bucketList.find(), all_status=status.find())\n\n@app.route('/activities/new-activity', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin', 'contributor')\ndef activity_page():\n return render_template('new-activity.html', all_categories=categories.find(), all_bucketList=bucketList.find(), all_status=status.find())\n\n\n\n\n@app.route('/activities/add-activity', methods=['POST'])\n@login_required\n@roles_required('admin', 'contributor')\ndef add_activity():\n if request.method == 'POST':\n form = request.form\n \n new_activity = {\n \n 'activity_name' : form['activity_name'],\n 'category' : form['category'],\n 'description' : form['description'],\n 'share_status' : form['share_status'],\n 'estimated_cost' : form['estimated_cost'],\n 'address' : form['address'],\n 'city' : form['city'],\n 'state' : form['state'],\n 'country' : form['country'],\n 'expected_date' : form['expected_date'],\n \n 'username' : form['username'],\n\n 'date_added': datetime.datetime.now(),\n 'date_modified': datetime.datetime.now()\n \n }\n bucketList.insert_one(new_activity)\n flash('New activity has been added.', 'success')\n return redirect(url_for('view_my_activities'))\n return render_template('new-activity.html', all_categories=categories.find())\n\n@app.route('/activities/edit-activity/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin', 'contributor')\ndef edit_activity(activity_id):\n edit_activity = bucketList.find_one({'_id': ObjectId(activity_id)})\n if edit_activity:\n return render_template('edit-activity.html', activity=edit_activity, all_categories=categories.find(), all_status=status.find())\n flash('activity not found.', 'danger')\n return redirect(url_for('admin_activities'))\n@app.route('/activities/update-activity/', methods=['POST'])\n@login_required\n@roles_required('admin', 'contributor')\ndef update_activity(activity_id):\n if request.method == 'POST':\n form = request.form\n bucketList.update({'_id': ObjectId(activity_id)},\n {\n 'activity_name' : form['activity_name'],\n 'category' : form['category'],\n 'description' : form['description'],\n 'share_status' : form['share_status'],\n 'estimated_cost' : form['estimated_cost'],\n 'address' : form['address'],\n 'city' : form['city'],\n 'state' : form['state'],\n 'country' : form['country'],\n 'expected_date' : form['expected_date'],\n \n 'username' : form['username'],\n\n 'date_added': form['date_added'],\n 'date_modified': datetime.datetime.now()\n\n\n \n })\n update_activity = bucketList.find_one({'_id': ObjectId(activity_id)})\n flash(update_activity['activity_name'] + ' has been updated.', 'success')\n return redirect(url_for('view_activities'))\n return render_template('edit-activity.html', all_categories=categories.find())\n\n\n@app.route('/activities/delete-activity/', methods=['POST'])\n@login_required\n@roles_required('admin', 'contributor' )\ndef delete_activity(activity_id):\n delete_activity = bucketList.find_one({'_id': ObjectId(activity_id)})\n if delete_activity:\n bucketList.delete_one(delete_activity)\n flash(delete_activity['activity_name'] + ' has been deleted.', 'danger')\n return redirect(url_for('view_activities'))\n flash('activity not found.', 'warning')\n return redirect(url_for('view_activities'))\n# authenticated users can print a activity\n@app.route('/activities/print-activity/', methods=['GET', 'POST'])\n\ndef print_activity(activity_id):\n print_activity = bucketList.find_one({'_id': ObjectId(activity_id)})\n if print_activity:\n return render_template('print-activity.html', activity=print_activity)\n flash('activity not found.', 'danger')\n return redirect(url_for('view_activities'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 18677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 31, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_login.current_user.role", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 78, "usage_type": "name"}, {"api_name": "flask_login.current_user.role", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 82, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 129, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 166, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 175, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 176, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 177, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 183, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 184, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 186, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 200, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 202, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 216, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 225, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 239, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 244, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 219, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 255, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 256, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 247, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 266, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 272, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 272, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 273, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 275, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 277, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 286, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 286, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 288, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 289, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 291, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 305, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 305, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 306, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 306, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 307, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 307, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 316, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 316, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 317, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 302, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 325, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 326, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 326, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 327, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 328, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 328, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 319, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 335, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 332, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 342, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 342, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 343, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 343, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 354, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 339, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 362, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 363, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 364, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 364, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 357, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 369, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 369, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 370, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 370, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 371, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 376, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 377, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 378, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 378, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 379, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 366, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 384, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 387, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 388, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 388, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 389, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 390, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 390, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 381, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 397, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 410, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 413, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 413, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 414, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 414, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 415, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 415, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 416, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 428, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 425, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 436, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 433, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 442, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 439, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 451, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 451, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 452, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 452, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 469, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 469, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 470, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 470, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 474, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 475, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 475, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 476, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 448, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 482, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 484, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 485, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 486, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 486, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 479, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 491, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 491, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 492, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 492, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 493, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 509, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 509, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 514, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 515, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 516, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 516, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 517, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 488, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 524, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 527, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 528, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 528, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 529, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 530, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 530, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 521, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 535, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 537, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 538, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 539, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 539, "usage_type": "call"}]} +{"seq_id": "279632711", "text": "import argparse\nimport tokenizer\n\n\ndef read_data_from_file(file_path):\n f = open(file_path, 'r')\n ret = f.read()\n f.close()\n return ret\n\n\ndef main(args):\n dict_tokens = tokenizer.Tokenizer(read_data_from_file(args.dict))\n dict_set = set()\n cur = dict_tokens.next_token()\n while cur is not None:\n dict_set.add(cur[0])\n cur = dict_tokens.next_token()\n\n file_tokens = tokenizer.Tokenizer(read_data_from_file(args.file))\n cur = file_tokens.next_token()\n while cur is not None:\n if cur[0] not in dict_set:\n print(cur)\n cur = file_tokens.next_token()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'dict'\n )\n parser.add_argument(\n 'file'\n )\n main(parser.parse_args())", "sub_path": "semester-1/fundamentals-of-computer-science/python/4-mistakes.py", "file_name": "4-mistakes.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tokenizer.Tokenizer", "line_number": 13, "usage_type": "call"}, {"api_name": "tokenizer.Tokenizer", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "615031076", "text": "from flask import Flask,request,jsonify\nimport telebot\nimport json\ntoken=\"781229574:AAGC6K39EQ1VNcf2RTOlLpXg_KWoHPAZTI\"\n\napp = Flask(__name__)\nbot=telebot.TeleBot(token)\n\n\n\n@app.route('/',methods=[\"POST\",\"GET\"])\ndef hello_world():\n bot.set_webhook(\"https://weatherbetabot.herokuapp.com/\")\n if request.method == \"POST\":\n r=json.loads(request.data)\n chat_id=r['message']['chat']['id']\n bot.send_message(chat_id,'it work')\n return (jsonify(r),200)\n else:\n \n return ('Hi',200,None)\n", "sub_path": "app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "288467779", "text": "# Copyright (c) 2021 The Toltec Contributors\n# SPDX-License-Identifier: MIT\n\"\"\"Build recipes and create packages.\"\"\"\n\nimport shutil\nfrom typing import (\n Any,\n Deque,\n Dict,\n Iterable,\n List,\n MutableMapping,\n Optional,\n Tuple,\n)\nfrom collections import deque\nimport re\nimport os\nimport logging\nimport textwrap\nimport docker\nimport requests\nfrom . import bash, util, ipk, paths\nfrom .recipe import Recipe, Package\nfrom .version import DependencyKind\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuildError(Exception):\n \"\"\"Raised when a build step fails.\"\"\"\n\n\nclass BuildContextAdapter(logging.LoggerAdapter):\n \"\"\"Prefix log entries with information about the current build target.\"\"\"\n\n def process(\n self, msg: str, kwargs: MutableMapping[str, Any]\n ) -> Tuple[str, MutableMapping[str, Any]]:\n prefix = \"\"\n\n if \"recipe\" in self.extra:\n prefix += self.extra[\"recipe\"]\n\n if \"package\" in self.extra:\n prefix += f\" ({self.extra['package']})\"\n\n if prefix:\n return f\"{prefix}: {msg}\", kwargs\n\n return msg, kwargs\n\n\nclass Builder: # pylint: disable=too-few-public-methods\n \"\"\"Helper class for building recipes.\"\"\"\n\n # Detect non-local paths\n URL_REGEX = re.compile(r\"[a-z]+://\")\n\n # Prefix for all Toltec Docker images\n IMAGE_PREFIX = \"ghcr.io/toltec-dev/\"\n\n # Toltec Docker image used for generic tasks\n DEFAULT_IMAGE = \"toolchain:v1.3.1\"\n\n def __init__(self, work_dir: str, repo_dir: str) -> None:\n \"\"\"\n Create a builder helper.\n\n :param work_dir: directory where packages are built\n :param repo_dir: directory where built packages are stored\n \"\"\"\n self.work_dir = work_dir\n os.makedirs(work_dir, exist_ok=True)\n\n self.repo_dir = repo_dir\n os.makedirs(repo_dir, exist_ok=True)\n\n self.install_lib = \"\"\n install_lib_path = os.path.join(paths.SCRIPTS_DIR, \"install-lib\")\n\n with open(install_lib_path, \"r\") as file:\n for line in file:\n if not line.strip().startswith(\"#\"):\n self.install_lib += line\n\n self.context: Dict[str, str] = {}\n self.adapter = BuildContextAdapter(logger, self.context)\n\n try:\n self.docker = docker.from_env()\n except docker.errors.DockerException as err:\n raise BuildError(\n \"Unable to connect to the Docker daemon. \\\nPlease check that the service is running and that you have the necessary \\\npermissions.\"\n ) from err\n\n def make(\n self, recipe: Recipe, packages: Optional[Iterable[Package]] = None\n ) -> bool:\n \"\"\"\n Build a recipe and create its associated packages.\n\n :param recipe: recipe to make\n :param packages: list of packages of the recipe to make\n (default: all of them)\n :returns: true if all packages were built correctly\n \"\"\"\n self.context[\"recipe\"] = recipe.name\n build_dir = os.path.join(self.work_dir, recipe.name)\n\n if not util.check_directory(\n build_dir,\n f\"The build directory '{os.path.relpath(build_dir)}' for recipe \\\n'{recipe.name}' already exists.\\nWould you like to [c]ancel, [r]emove \\\nthat directory, or [k]eep it (not recommended)?\",\n ):\n return False\n\n src_dir = os.path.join(build_dir, \"src\")\n os.makedirs(src_dir, exist_ok=True)\n\n base_pkg_dir = os.path.join(build_dir, \"pkg\")\n os.makedirs(base_pkg_dir, exist_ok=True)\n\n self._fetch_source(recipe, src_dir)\n self._prepare(recipe, src_dir)\n self._build(recipe, src_dir)\n self._strip(recipe, src_dir)\n\n for package in (\n packages if packages is not None else recipe.packages.values()\n ):\n self.context[\"package\"] = package.name\n\n pkg_dir = os.path.join(base_pkg_dir, package.name)\n os.makedirs(pkg_dir, exist_ok=True)\n\n self._package(package, src_dir, pkg_dir)\n self._archive(package, pkg_dir)\n del self.context[\"package\"]\n\n return True\n\n def _fetch_source(\n self,\n recipe: Recipe,\n src_dir: str,\n ) -> None:\n \"\"\"Fetch and extract all source files required to build a recipe.\"\"\"\n self.adapter.info(\"Fetching source files\")\n\n for source in recipe.sources:\n filename = os.path.basename(source.url)\n local_path = os.path.join(src_dir, filename)\n\n if self.URL_REGEX.match(source.url) is None:\n # Get source file from the recipe’s directory\n shutil.copy2(os.path.join(recipe.path, source.url), local_path)\n else:\n # Fetch source file from the network\n req = requests.get(source.url)\n\n if req.status_code != 200:\n raise BuildError(\n f\"Unexpected status code while fetching \\\nsource file '{source.url}', got {req.status_code}\"\n )\n\n with open(local_path, \"wb\") as local:\n for chunk in req.iter_content(chunk_size=1024):\n local.write(chunk)\n\n # Verify checksum\n if (\n source.checksum != \"SKIP\"\n and util.file_sha256(local_path) != source.checksum\n ):\n raise BuildError(\n f\"Invalid checksum for source file {source.url}\"\n )\n\n # Automatically extract source archives\n if not source.noextract:\n if not util.auto_extract(local_path, src_dir):\n self.adapter.debug(\n \"Not extracting %s (unsupported archive type)\",\n local_path,\n )\n\n def _prepare(self, recipe: Recipe, src_dir: str) -> None:\n \"\"\"Prepare source files before building.\"\"\"\n script = recipe.functions[\"prepare\"]\n\n if not script:\n self.adapter.debug(\"Skipping prepare (nothing to do)\")\n return\n\n self.adapter.info(\"Preparing source files\")\n logs = bash.run_script(\n script=script,\n variables={\n **recipe.variables,\n **recipe.custom_variables,\n \"srcdir\": src_dir,\n },\n )\n\n self._print_logs(logs, \"prepare()\")\n\n def _build(self, recipe: Recipe, src_dir: str) -> None:\n \"\"\"Build artifacts for a recipe.\"\"\"\n script = recipe.functions[\"build\"]\n\n if not script:\n self.adapter.debug(\"Skipping build (nothing to do)\")\n return\n\n self.adapter.info(\"Building artifacts\")\n\n # Set fixed atime and mtime for all the source files\n epoch = int(recipe.timestamp.timestamp())\n\n for filename in util.list_tree(src_dir):\n os.utime(filename, (epoch, epoch))\n\n mount_src = \"/src\"\n repo_src = \"/repo\"\n uid = os.getuid()\n pre_script: List[str] = []\n\n # Install required dependencies\n build_deps = []\n host_deps = []\n\n for dep in recipe.makedepends:\n if dep.kind == DependencyKind.Build:\n build_deps.append(dep.package)\n elif dep.kind == DependencyKind.Host:\n host_deps.append(dep.package)\n\n if build_deps:\n pre_script.extend(\n (\n \"export DEBIAN_FRONTEND=noninteractive\",\n \"apt-get update -qq\",\n \"apt-get install -qq --no-install-recommends\"\n ' -o Dpkg::Options::=\"--force-confdef\"'\n ' -o Dpkg::Options::=\"--force-confold\"'\n \" -- \" + \" \".join(build_deps),\n )\n )\n\n if host_deps:\n pre_script.extend(\n (\n \"opkg update --verbosity=0 --offline-root $SYSROOT\",\n \"opkg install --verbosity=0 --no-install-recommends\"\n \" --offline-root $SYSROOT\"\n \" -- \" + \" \".join(host_deps),\n )\n )\n\n logs = bash.run_script_in_container(\n self.docker,\n image=self.IMAGE_PREFIX + recipe.image,\n mounts=[\n docker.types.Mount(\n type=\"bind\",\n source=os.path.abspath(src_dir),\n target=mount_src,\n ),\n docker.types.Mount(\n type=\"bind\",\n source=os.path.abspath(self.repo_dir),\n target=repo_src,\n ),\n ],\n variables={\n **recipe.variables,\n **recipe.custom_variables,\n \"srcdir\": mount_src,\n },\n script=\"\\n\".join(\n (\n *pre_script,\n f'cd \"{mount_src}\"',\n script,\n f'chown -R {uid}:{uid} \"{mount_src}\"',\n )\n ),\n )\n\n self._print_logs(logs, \"build()\")\n\n def _strip(self, recipe: Recipe, src_dir: str) -> None:\n \"\"\"Strip all debugging symbols from binaries.\"\"\"\n if \"nostrip\" in recipe.flags:\n self.adapter.debug(\"Not stripping binaries (nostrip flag set)\")\n return\n\n self.adapter.info(\"Stripping binaries\")\n mount_src = \"/src\"\n\n logs = bash.run_script_in_container(\n self.docker,\n image=self.IMAGE_PREFIX + self.DEFAULT_IMAGE,\n mounts=[\n docker.types.Mount(\n type=\"bind\",\n source=os.path.abspath(src_dir),\n target=mount_src,\n )\n ],\n variables={},\n script=\"\\n\".join(\n (\n # Strip binaries in the target arch\n f'find \"{mount_src}\" -type f -executable -print0 \\\n| xargs --no-run-if-empty --null \"${{CROSS_COMPILE}}strip\" --strip-all || true',\n # Strip binaries in the host arch\n f'find \"{mount_src}\" -type f -executable -print0 \\\n| xargs --no-run-if-empty --null strip --strip-all || true',\n )\n ),\n )\n\n self._print_logs(logs)\n\n def _package(self, package: Package, src_dir: str, pkg_dir: str) -> None:\n \"\"\"Make a package from a recipe’s build artifacts.\"\"\"\n self.adapter.info(\"Packaging build artifacts\")\n logs = bash.run_script(\n script=package.functions[\"package\"],\n variables={\n **package.variables,\n **package.custom_variables,\n \"srcdir\": src_dir,\n \"pkgdir\": pkg_dir,\n },\n )\n\n self._print_logs(logs, \"package()\")\n self.adapter.debug(\"Resulting tree:\")\n\n for filename in util.list_tree(pkg_dir):\n self.adapter.debug(\n \" - %s\",\n os.path.normpath(\n os.path.join(\"/\", os.path.relpath(filename, pkg_dir))\n ),\n )\n\n def _archive(self, package: Package, pkg_dir: str) -> None:\n \"\"\"Create an archive for a package.\"\"\"\n self.adapter.info(\"Creating archive\")\n ar_path = os.path.join(paths.REPO_DIR, package.filename())\n\n # Inject Oxide-specific hook for reloading apps\n if os.path.exists(os.path.join(pkg_dir, \"opt/usr/share/applications\")):\n oxide_hook = \"\\nreload-oxide-apps\\n\"\n package.functions[\"configure\"] += oxide_hook\n package.functions[\"postupgrade\"] += oxide_hook\n package.functions[\"postremove\"] += oxide_hook\n\n # Convert install scripts to Debian format\n scripts = {}\n script_header = \"\\n\".join(\n (\n textwrap.dedent(\n \"\"\"\\\n #!/usr/bin/env bash\n set -euo pipefail\n \"\"\"\n ),\n bash.put_variables(\n {\n **package.parent.variables,\n **package.variables,\n **package.custom_variables,\n }\n ),\n bash.put_functions(package.custom_functions),\n self.install_lib,\n )\n )\n\n for name, script, action in (\n (\"preinstall\", \"preinst\", \"install\"),\n (\"configure\", \"postinst\", \"configure\"),\n ):\n if package.functions[name]:\n scripts[script] = \"\\n\".join(\n (\n script_header,\n textwrap.dedent(\n f\"\"\"\\\n if [[ $1 = {action} ]]; then\n script() {{\n \"\"\"\n ),\n package.functions[name],\n textwrap.dedent(\n \"\"\"\\\n }\n script\n fi\n \"\"\"\n ),\n )\n )\n\n for step in (\"pre\", \"post\"):\n if (\n package.functions[step + \"upgrade\"]\n or package.functions[step + \"remove\"]\n ):\n script = script_header\n\n for action in (\"upgrade\", \"remove\"):\n if package.functions[step + action]:\n script += \"\\n\".join(\n (\n textwrap.dedent(\n f\"\"\"\\\n if [[ $1 = {action} ]]; then\n script() {{\n \"\"\"\n ),\n package.functions[step + action],\n textwrap.dedent(\n \"\"\"\\\n }\n script\n fi\n \"\"\"\n ),\n )\n )\n\n scripts[step + \"rm\"] = script\n\n self.adapter.debug(\"Install scripts:\")\n\n if scripts:\n for script in sorted(scripts):\n self.adapter.debug(\" - %s\", script)\n else:\n self.adapter.debug(\"(none)\")\n\n epoch = int(package.parent.timestamp.timestamp())\n\n with open(ar_path, \"wb\") as file:\n ipk.make_ipk(\n file,\n epoch=epoch,\n pkg_dir=pkg_dir,\n metadata=package.control_fields(),\n scripts=scripts,\n )\n\n # Set fixed atime and mtime for the resulting archive\n os.utime(ar_path, (epoch, epoch))\n\n def _print_logs(\n self,\n logs: bash.LogGenerator,\n function_name: str = None,\n max_lines_on_fail: int = 50,\n ) -> None:\n \"\"\"\n Print logs to the debug output or buffer and print the last n log lines\n if a ScriptError is caught.\n\n :param logs: generator of log lines\n :param function_name: calling function name\n :param max_lines_on_fail: number of context lines to print\n in non-debug mode\n \"\"\"\n log_buffer: Deque[str] = deque()\n try:\n for line in logs:\n if self.adapter.getEffectiveLevel() <= logging.DEBUG:\n self.adapter.debug(line)\n else:\n if len(log_buffer) == max_lines_on_fail:\n log_buffer.popleft()\n log_buffer.append(line)\n except bash.ScriptError as err:\n if len(log_buffer) > 0:\n self.adapter.info(\n f\"Only showing up to {max_lines_on_fail} lines of context. \"\n + \"Use --verbose for the full output.\"\n )\n for line in log_buffer:\n self.adapter.error(line)\n\n if function_name:\n self.adapter.error(f\"{function_name} failed\")\n\n raise err\n", "sub_path": "scripts/toltec/builder.py", "file_name": "builder.py", "file_ext": "py", "file_size_in_byte": 16413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.LoggerAdapter", "line_number": 34, "usage_type": "attribute"}, {"api_name": "typing.MutableMapping", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 74, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 87, "usage_type": "name"}, {"api_name": "docker.from_env", "line_number": 91, "usage_type": "call"}, {"api_name": "docker.errors", "line_number": 92, "usage_type": "attribute"}, {"api_name": "recipe.Recipe", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 100, "usage_type": "name"}, {"api_name": "recipe.Package", "line_number": 100, "usage_type": "name"}, {"api_name": "recipe.name", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "recipe.name", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "recipe.name", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 125, "usage_type": "call"}, {"api_name": "recipe.packages.values", "line_number": 133, "usage_type": "call"}, {"api_name": "recipe.packages", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "recipe.Recipe", "line_number": 148, "usage_type": "name"}, {"api_name": "recipe.sources", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "recipe.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 163, "usage_type": "call"}, {"api_name": "recipe.Recipe", "line_number": 192, "usage_type": "name"}, {"api_name": "recipe.functions", "line_number": 194, "usage_type": "attribute"}, {"api_name": "recipe.variables", "line_number": 204, "usage_type": "attribute"}, {"api_name": "recipe.custom_variables", "line_number": 205, "usage_type": "attribute"}, {"api_name": "recipe.Recipe", "line_number": 212, "usage_type": "name"}, {"api_name": "recipe.functions", "line_number": 214, "usage_type": "attribute"}, {"api_name": "recipe.timestamp.timestamp", "line_number": 223, "usage_type": "call"}, {"api_name": "recipe.timestamp", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.utime", "line_number": 226, "usage_type": "call"}, {"api_name": "os.getuid", "line_number": 230, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 231, "usage_type": "name"}, {"api_name": "recipe.makedepends", "line_number": 237, "usage_type": "attribute"}, {"api_name": "version.DependencyKind.Build", "line_number": 238, "usage_type": "attribute"}, {"api_name": "version.DependencyKind", "line_number": 238, "usage_type": "name"}, {"api_name": "version.DependencyKind.Host", "line_number": 240, "usage_type": "attribute"}, {"api_name": "version.DependencyKind", "line_number": 240, "usage_type": "name"}, {"api_name": "recipe.image", "line_number": 267, "usage_type": "attribute"}, {"api_name": "docker.types.Mount", "line_number": 269, "usage_type": "call"}, {"api_name": "docker.types", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "docker.types.Mount", "line_number": 274, "usage_type": "call"}, {"api_name": "docker.types", "line_number": 274, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "recipe.variables", "line_number": 281, "usage_type": "attribute"}, {"api_name": "recipe.custom_variables", "line_number": 282, "usage_type": "attribute"}, {"api_name": "recipe.Recipe", "line_number": 297, "usage_type": "name"}, {"api_name": "recipe.flags", "line_number": 299, "usage_type": "attribute"}, {"api_name": "docker.types.Mount", "line_number": 310, "usage_type": "call"}, {"api_name": "docker.types", "line_number": 310, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "recipe.Package", "line_number": 331, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 351, "usage_type": "call"}, {"api_name": "recipe.Package", "line_number": 355, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path", "line_number": 358, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 361, "usage_type": "call"}, {"api_name": "os.path", "line_number": 361, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 361, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 371, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 397, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 404, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 425, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 432, "usage_type": "call"}, {"api_name": "os.utime", "line_number": 464, "usage_type": "call"}, {"api_name": "typing.Deque", "line_number": 481, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 481, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 484, "usage_type": "attribute"}]} +{"seq_id": "3002260", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport helper\nimport matplotlib.pyplot as plt\nfrom keras.applications import *\nfrom keras.callbacks import EarlyStopping\nimport os\n\n\n# In[2]:\n\n\n#设置各种参数\ntrain_path = ['./data/train2/cat', './data/train2/dog']\ntest_path ='./data/test1/test1'\nimg_size =(299,299)\nlayer_num = 125\nmodel_image ='./models/model_image_fine_tuning_xception_0403_02.png'\nmodel_weights_file = './models/weights_fine_tuning_xception_no_outliers_0403_02.h5'\ntemplate_csv_path = './predicts/sample_submission.csv'\ntarget_csv_path = './predicts/pred_fine_tuning_xception_no_outliers_0403_02.csv'\nMODEL = xception.Xception\npreprocess_func = xception.preprocess_input\n\n\n# In[3]:\n\n\n#获取训练集数据\nX_train, y_train, image_files= helper.get_train_input_from_folder_with_subclasses(train_path, img_size, lambda_func=preprocess_func)\nprint(\"finished\")\n\n\n# In[4]:\n\n\n#构造模型,锁定base_model所有层\nmodel = helper.get_fine_tuning_first_model(MODEL)\n\n#可视化模型\nhelper.visualize_model(model, model_image)\nprint(\"finished\")\n\n\n# In[5]:\n\n\nprint(\"start\")\n#第一次训练新添加层权重\nearly_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')\nmodel.fit(X_train, y_train, batch_size=128, epochs=8, validation_split=0.2, callbacks=[early_stopping])\nprint(\"finished\")\n\n\n# In[6]:\n\n\nprint(\"start\")\n#放开若干层权重,再次训练\nmodel = helper.get_fine_tuning_second_model(model, layer_num)\nearly_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')\nmodel.fit(X_train, y_train, batch_size=128, epochs=60, validation_split=0.2, callbacks=[early_stopping])\nprint(\"finished\")\n\n\n# In[7]:\n\n\n#保存模型参数\nmodel.save_weights(model_weights_file)\ndel X_train\ndel y_train\nprint(\"finished\")\n\n\n# In[8]:\n\n\nprint(\"start\")\n#获取测试数据和对应文件\nX_test, test_files = helper.get_input_from_folder_with_image_files(test_path, img_size, lambda_func=preprocess_func)\n\n#获取文件basename\nimage_file_names = [os.path.basename(path) for path in test_files]\n\n#预测并保存预测结果到csv\nhelper.predict_and_update_to_csv(model, X_test, image_file_names, template_csv_path, target_csv_path)\n\nprint(\"finished\")\n\n", "sub_path": "p6_p7/py/fine_tuning_xception_no_outliers_final.py", "file_name": "fine_tuning_xception_no_outliers_final.py", "file_ext": "py", "file_size_in_byte": 2225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "helper.get_train_input_from_folder_with_subclasses", "line_number": 34, "usage_type": "call"}, {"api_name": "helper.get_fine_tuning_first_model", "line_number": 42, "usage_type": "call"}, {"api_name": "helper.visualize_model", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 54, "usage_type": "call"}, {"api_name": "helper.get_fine_tuning_second_model", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 65, "usage_type": "call"}, {"api_name": "helper.get_input_from_folder_with_image_files", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "helper.predict_and_update_to_csv", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "253470660", "text": "# -*- coding: utf-8 -*-\nfrom plone import api\nfrom ftw.testbrowser import browsing\nfrom ftw.testbrowser.pages import factoriesmenu\nfrom opengever.testing import IntegrationTestCase\nfrom zope.annotation.interfaces import IAnnotations\n\n\nclass TestCreateDocFromOneoffixxTemplate(IntegrationTestCase):\n\n def setUp(self):\n super(TestCreateDocFromOneoffixxTemplate, self).setUp()\n self.activate_feature(\"officeconnector-checkout\")\n self.activate_feature(\"oneoffixx\")\n\n @browsing\n def test_document_creation_from_oneoffixx_template_creates_shadow_doc(self, browser):\n self.login(self.regular_user, browser)\n browser.open(self.dossier)\n factoriesmenu.add('document_with_oneoffixx_template')\n\n node = browser.css(\"#form-widgets-template-2574d08d-95ea-4639-beab-3103fe4c3bc7\").first\n browser.fill({'Title': 'A doc'})\n browser.fill({'Template': node.get(\"title\")})\n browser.find('Save').click()\n\n self.assertEqual('document-state-shadow',\n api.content.get_state(browser.context))\n self.assertTrue(browser.context.is_shadow_document())\n\n @browsing\n def test_template_id_stored_in_annotations(self, browser):\n self.login(self.regular_user, browser)\n browser.open(self.dossier)\n factoriesmenu.add('document_with_oneoffixx_template')\n\n node = browser.css(\"#form-widgets-template-2574d08d-95ea-4639-beab-3103fe4c3bc7\").first\n browser.fill({'Title': 'A doc'})\n browser.fill({'Template': node.get(\"title\")})\n browser.find('Save').click()\n\n annotations = IAnnotations(browser.context)\n self.assertEqual(node.get(\"value\"), annotations['template-id'])\n\n\nclass TestOneOffixxTemplateFeature(IntegrationTestCase):\n\n @browsing\n def test_doc_from_oneoffixx_template_available_if_oneoffixxtemplate_feature_enabled(self, browser):\n self.activate_feature(\"officeconnector-checkout\")\n self.login(self.manager, browser)\n browser.open(self.dossier)\n\n self.assertEquals(\n ['Document',\n 'document_with_template',\n 'Task',\n 'Add task from template',\n 'Subdossier',\n 'Participant'],\n factoriesmenu.addable_types())\n\n self.activate_feature(\"oneoffixx\")\n browser.open(self.dossier)\n self.assertEquals(\n ['Document',\n 'document_with_template',\n 'document_with_oneoffixx_template',\n 'Task',\n 'Add task from template',\n 'Subdossier',\n 'Participant'],\n factoriesmenu.addable_types())\n", "sub_path": "opengever/oneoffixx/tests/test_oneoffixx.py", "file_name": "test_oneoffixx.py", "file_ext": "py", "file_size_in_byte": 2659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "opengever.testing.IntegrationTestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu.add", "line_number": 20, "usage_type": "call"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu", "line_number": 20, "usage_type": "name"}, {"api_name": "plone.api.content.get_state", "line_number": 28, "usage_type": "call"}, {"api_name": "plone.api.content", "line_number": 28, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 28, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 16, "usage_type": "name"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu.add", "line_number": 35, "usage_type": "call"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu", "line_number": 35, "usage_type": "name"}, {"api_name": "zope.annotation.interfaces.IAnnotations", "line_number": 42, "usage_type": "call"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 31, "usage_type": "name"}, {"api_name": "opengever.testing.IntegrationTestCase", "line_number": 46, "usage_type": "name"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu.addable_types", "line_number": 61, "usage_type": "call"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu", "line_number": 61, "usage_type": "name"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu.addable_types", "line_number": 73, "usage_type": "call"}, {"api_name": "ftw.testbrowser.pages.factoriesmenu", "line_number": 73, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "407870143", "text": "from django import template\nfrom django.templatetags.static import static\nregister = template.Library()\n\n# Django incluison tag plays elegant way to separete bootstrap template logic\n# from app template, that separation is need for theme the projects_type\n\n# Pass in kwargs the elements to fill the cards\n\n# Please note that all templates are contained in cards\n# You are free to arrange them in grids or other elements\n\n@register.inclusion_tag('includes/merge_html.html')\ndef merge_html(*args, **kwargs):\n html_text = {}\n for html_piece in args:\n html_text.update(html_piece)\n\n return {\"html_text\":html_text,\n }\n", "sub_path": "core/templatetags/general_tags.py", "file_name": "general_tags.py", "file_ext": "py", "file_size_in_byte": 640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "242464468", "text": "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 09:33, 16/03/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n# ------------------------------------------------------------------------------------------------------%\n\nimport concurrent.futures as parallel\nfrom functools import partial\nimport numpy as np\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseGA(Optimizer):\n \"\"\"\n Genetic Algorithm (GA)\n Link:\n https://blog.sicara.com/getting-started-genetic-algorithms-python-tutorial-81ffa1dd72f9\n https://www.tutorialspoint.com/genetic_algorithms/genetic_algorithms_quick_guide.htm\n https://www.analyticsvidhya.com/blog/2017/07/introduction-to-genetic-algorithm/\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, pc=0.95, pm=0.025, **kwargs):\n \"\"\"\n Args:\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n pc (float): cross-over probability, default = 0.95\n pm (float): mutation probability, default = 0.025\n \"\"\"\n super().__init__(problem, kwargs)\n self.nfe_per_epoch = 2 * pop_size\n self.sort_flag = False\n\n self.epoch = epoch\n self.pop_size = pop_size\n self.pc = pc\n self.pm = pm\n\n def create_child(self, agent_i, pop_copy, list_fitness):\n ### Selection\n # c1, c2 = self._get_parents_kway_tournament_selection__(pop, k_way=0.2)\n id_c1 = self.get_index_roulette_wheel_selection(list_fitness)\n id_c2 = self.get_index_roulette_wheel_selection(list_fitness)\n\n w1 = pop_copy[id_c1][self.ID_POS]\n w2 = pop_copy[id_c2][self.ID_POS]\n ### Crossover\n if np.random.uniform() < self.pc:\n w1, w2 = self.crossover_arthmetic_recombination(w1, w2)\n\n ### Mutation, remove third loop here\n w1 = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.pm, np.random.uniform(self.problem.lb, self.problem.ub), w1)\n w2 = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.pm, np.random.uniform(self.problem.lb, self.problem.ub), w2)\n\n if np.random.uniform() < 0.5:\n return [w1.copy(), self.get_fitness_position(w1)]\n else:\n return [w2.copy(), self.get_fitness_position(w2)]\n\n def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):\n \"\"\"\n Args:\n mode (str): 'sequential', 'thread', 'process'\n + 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)\n + 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)\n + 'process': recommended for hard and big task (> 2 minutes for calculating objective)\n\n Returns:\n [position, fitness value]\n \"\"\"\n # c1, c2 = self._get_parents_kway_tournament_selection__(pop, k_way=0.2)\n list_fitness = np.array([agent[self.ID_FIT][self.ID_TAR] for agent in pop])\n pop_copy = pop.copy()\n\n if mode == \"thread\":\n with parallel.ThreadPoolExecutor() as executor:\n pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, list_fitness=list_fitness), pop)\n pop = [x for x in pop_child]\n elif mode == \"process\":\n with parallel.ProcessPoolExecutor() as executor:\n pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, list_fitness=list_fitness), pop)\n pop = [x for x in pop_child]\n else:\n pop = [self.create_child(agent, pop_copy, list_fitness) for agent in pop]\n return pop\n", "sub_path": "mealpy/evolutionary_based/GA.py", "file_name": "GA.py", "file_ext": "py", "file_size_in_byte": 4291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "mealpy.optimizer.Optimizer", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 79, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 79, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 80, "usage_type": "call"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 83, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 83, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "632017191", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import *\r\nfrom scipy.interpolate import *\r\nfrom scipy.optimize import *\r\n\r\n# integration\r\n\r\ndef f(x):\r\n return 3.0*x*x +1.0\r\nI,err=quad(f,0,1)\r\nprint(\"I= \",I,\"err: \",err)\r\n\r\nx_given=np.linspace(0,10,10)\r\ny_given=np.cos(x_given**2.0/8.0)\r\nxx=np.linspace(0,10,1000)\r\nyy=np.cos(xx**2.0/8.0)\r\n\r\n# plt.plot(x_given,y_given,'o',label='given data')\r\n# plt.plot(xx,yy,':',label='perfect')\r\n# plt.plot('x')\r\n# plt.ylabel('y')\r\n# plt.legend(loc='best')\r\n\r\n\r\n# interpolation\r\nx_i=np.linspace(0,10,1000)\r\n# -----Linear interpolation\r\nf_linear=interp1d(x_given,y_given)\r\ny_il=f_linear(x_i)\r\n\r\nf_spline=interp1d(x_given,y_given,kind='cubic')\r\ny_is=f_spline(x_i)\r\n\r\n# plt.plot(x_given,y_given,'o')\r\n# plt.plot(x_i,y_il,'-')\r\n# plt.plot(x_i,y_is,'--')\r\n# plt.plot(xx,yy,':')\r\n# plt.legend(['data','linear','spline','perfect'],loc='best')\r\n\r\n# plt.show()\r\n\r\nx_gv=np.array([0.,1.,2.,3.,4.,5.])\r\ny_gv=np.array([0,0.8,0.9,0.1,-0.8,-1.0])\r\nx_p=np.linspace(-2,6.0,100)\r\np3=np.polyfit(x_gv,y_gv,3)\r\ny_p=np.polyval(p3,x_p)\r\nplt.plot(x_gv,y_gv,'o')\r\nplt.plot(x_p,y_p,'-')\r\nplt.legend(['data','polyfit'],loc='best')\r\nplt.ylim(-2,2)\r\nprint(p3)\r\nplt.show()\r\n\r\n# General curve fits\r\n\r\ndef f(x,a,b,c):\r\n return a*np.exp(-b*x)+c\r\nx_g=np.linspace(0,4,50)\r\ny_g=f(x_g,2.5,1.3,0.5)+0.2*np.random.normal(size=len(x_g))\r\n\r\nparams,extras=curve_fit(f,x_g,y_g)\r\nprint(\"c=%g, b=%g,c=%g\" %(params[0],params[1],params[2]))\r\nplt.plot(x_g,y_g,'o')\r\nplt.plot(x_g,f(x_g,params[0],params[1],params[2]))\r\nplt.legend(['data','fit'],loc='best')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "Bases/Section1.py", "file_name": "Section1.py", "file_ext": "py", "file_size_in_byte": 1605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.linspace", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "113401861", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals, print_function\n\n#A HistoryGraph Immutable Object\nimport uuid\nfrom .changetype import *\nfrom . import fields\nfrom operator import itemgetter\nimport hashlib\nimport six\n\nclass ImmutableObject(object):\n is_singleton = False\n\n\n def __init__(self, **kwargs):\n # Initialise the immutable object from the kwargs. It can never be changed once initialise\n self.insetup = True\n self._field = dict()\n variables = [a for a in dir(self.__class__) if not a.startswith('__') and not callable(getattr(self.__class__,a))]\n for k in variables:\n var = getattr(self.__class__, k)\n self._field[k] = var\n assert isinstance(var, fields.Collection) == False #Immutable objects not allow references to other objects just use a FieldText as a key\n if isinstance(var, fields.Field):\n setattr(self, k, var.create_instance(self, k))\n if k in kwargs:\n setattr(self, k, kwargs[k])\n self._prevhash = kwargs['_prevhash'] if '_prevhash' in kwargs else ''\n\n self.insetup = False\n\n def __setattr__(self, name, value):\n if name == \"insetup\":\n super(ImmutableObject, self).__setattr__(name, value)\n return\n if not self.insetup:\n assert False #Attempting to change an immutable object\n return\n super(ImmutableObject, self).__setattr__(name, value)\n\n def get_hash(self):\n #Immutable objects don't have UUIDs they have SHA256 hashes of their content\n s = sorted([(k,str(getattr(self, k))) for (k,v) in six.iteritems(self._field)], key=itemgetter(0)) + [('_prevhash', str(self._prevhash))]\n\n return hashlib.sha256(str(s).encode('utf-8')).hexdigest()\n\n def as_dict(self):\n #Return a dict suitable for transport\n ret = dict()\n for k in self._field:\n ret[k] = getattr(self, k)\n ret[\"_prevhash\"] = self._prevhash\n ret[\"classname\"] = self.__class__.__name__\n ret[\"hash\"] = self.get_hash()\n return ret\n\n def get_is_deleted(self):\n return False\n", "sub_path": "historygraph/immutableobject.py", "file_name": "immutableobject.py", "file_ext": "py", "file_size_in_byte": 2190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "six.iteritems", "line_number": 44, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 44, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "6401508", "text": "#coding=utf8\n#__author__chry\n#__date:2018/4/23\nfrom multiprocessing import Process,Manager\ndef f(d,l,n):\n\td[n] = '1'\n\td['2'] = 2\n\td[0.25] = None\n\tl.append(n)\n\tprint(l)\nif __name__=='__main__':\n\twith Manager() as manger:\n\t\td = manger.dict()\n\t\tl = manger.list(range(5))\n\t\tp_list=[]\n\t\tfor i in range(10):\n\t\t\tp=Process(target=f,args=(d,l,i))\n\t\t\tp.start()\n\t\t\tp_list.append(p)\n\t\tfor res in p_list:\n\t\t\tres.join()", "sub_path": "threading_learing/Manger.py", "file_name": "Manger.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "multiprocessing.Manager", "line_number": 12, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "407203490", "text": "import sys\nimport os\nimport argparse\n\n# Add parent directory to path to import general.py\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n# Import ../general.py\nfrom general import *\n\nTEST_NAME = \"network\"\nCLIENT_NAME = \"client-ngtcp2\"\nCLIENT_IMPLEMENTATION = \"ngtcp2\"\nQUIC_RESULTS_DIR = \"/root/quic-results\"\n\n\ndef run_test_client(client_container_id, client_name, server_name, network_setting, resource):\n print(\"starting test client to test \" + server_name)\n if client_container_id is None:\n print(\"cannot run test client, no container\")\n exit(-1)\n command = \"docker exec -i \" + client_container_id + \\\n \" python /scripts/network/network-client-test.py --client \" + client_name + \\\n \" --server \" + server_name + \" --network_setting \" + network_setting + \" --resource \" + resource\n print(\"test client command: \" + command)\n run_call_command(command)\n\n\ndef run_test_server(container_id, server_name, network_setting, resource):\n print(\"starting test server to test \" + server_name)\n if container_id is None:\n print(\"cannot run server, no container\")\n exit(-1)\n command = \"docker exec -i -d \" + container_id + \" python -u /scripts/network/network-emu-server-test.py --server \" + \\\n server_name + \" --network_setting \" + network_setting + \" --resource \" + resource\n print(\"test server command: \" + command)\n run_subprocess_command(command)\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--amount_of_runs', help='Amount of times the compliance tests need to be run',\n nargs='?', const=1, type=int, default=1)\n args = parser.parse_args()\n\n implementations = [\n \"ngtcp2\",\n \"quicker\",\n \"quant\"\n ]\n\n network_settings = [\n \"wifi\",\n \"wifi_transatlantic_loss\",\n \"4g\",\n \"2g_loss\"\n ]\n resources = [\n #\"index.html\",\n \"large-text.txt\",\n \"image.jpg\"\n ]\n\n remove_containers()\n client_container_id = None\n for x in range(0, args.amount_of_runs):\n update_start_time()\n for resource in resources:\n for implementation in implementations:\n for network_setting in network_settings:\n container_id = create_server_container(TEST_NAME,\n implementation)\n client_container_id = restart_test_client(TEST_NAME,\n CLIENT_IMPLEMENTATION, CLIENT_NAME, client_container_id, implementation)\n run_test_server(container_id, implementation,\n network_setting, resource)\n run_test_client(client_container_id, CLIENT_IMPLEMENTATION,\n implementation, network_setting, resource)\n remove_container(container_id)\n remove_container(client_container_id)\n print(\"network test done\")\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "scripts/network/network-emu-test.py", "file_name": "network-emu-test.py", "file_ext": "py", "file_size_in_byte": 2979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.insert", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "306174066", "text": "\nimport argparse\nimport os\n\nimport torch\nimport posenet\n\ndef valid_tensor(s):\n msg = \"Not a valid resolution: '{0}' [CxHxW].\".format(s)\n try:\n q = s.split('x')\n if len(q) != 3:\n raise argparse.ArgumentTypeError(msg)\n return [int(v) for v in q]\n except ValueError:\n raise argparse.ArgumentTypeError(msg)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Posenet exporter')\n\n parser.add_argument('-m','--model', type=int, default=101) # integer depth multiplier (50, 75, 100, 101)\n parser.add_argument('-s','--output_stride', type=int, default=16) # 16\n\n parser.add_argument('-r', '--ONNX_resolution', default=\"3x480x640\", type=valid_tensor,\n help='ONNX input resolution')\n parser.add_argument('-o', '--outfile', default='./out.onnx',\n help='output file path')\n\n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n\n model = posenet.load_model(args.model, output_stride=args.output_stride)\n \n # Export ONNX file\n input_names = [ \"input:0\" ] # this are our standardized in/out nameing (required for runtime)\n output_names = [ \"output:0\" ]\n dummy_input = torch.randn([1]+args.ONNX_resolution)\n ONNX_path = args.outfile\n # Exporting -- CAFFE2 compatible\n # requires operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK\n # https://github.com/pytorch/pytorch/issues/41848\n # for CAFFE2 backend (old exports mode...)\n #torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names, \n # keep_initializers_as_inputs=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)\n # Exporting -- ONNX runtime compatible\n # keep_initializers_as_inputs=True -> is required for onnx optimizer...\n torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names,\n keep_initializers_as_inputs=True, opset_version=11)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "export.py", "file_name": "export.py", "file_ext": "py", "file_size_in_byte": 2056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "argparse.ArgumentTypeError", "line_number": 13, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "posenet.load_model", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.onnx.export", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.onnx", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "156326706", "text": "# coding:utf-8\n# --author-- lanhua.zhou\nimport maya.cmds as cmds\n\nimport zfused_maya.node.core.check as check\nimport zfused_maya.node.core.clear as clear\n\nimport zfused_maya.widgets.checkwidget as checkwidget\nimport zfused_maya.tool.modeling.materialcheck as materialcheck\n\n\nclass ShadingCheck(checkwidget.CheckWidget):\n def __init__(self):\n super(ShadingCheck, self).__init__()\n self._init()\n self._check_all()\n self.recheck_button.clicked.connect(self._check_all)\n\n @classmethod\n def Reset(cls):\n cls.value = False\n \n def _check_all(self):\n _is_ok = True\n for widget in self.allCheckWidget:\n if self.auto_clear():\n widget.clear()\n value = widget.check()\n if not value:\n _is_ok = False\n widget.setHidden(False)\n else:\n if not self.show_all():\n widget.setHidden(True)\n else:\n widget.setHidden(False)\n \n checkwidget.CheckWidget.value = _is_ok\n print(checkwidget.CheckWidget.value)\n check.Check.value = _is_ok\n\n if _is_ok:\n self.close()\n\n def show(self):\n import zfused_maya.core.restricted as restricted\n import maya.cmds as cmds\n _has_per, _info = restricted.restricted()\n if not _has_per:\n cmds.confirmDialog(message = _info)\n return \n super(ShadingCheck, self).show()\n\n\n def _init(self):\n self.set_title_name(u\"材质文件检查\")\n\n #check file name\n widget = checkwidget.ItemWidget(u\"检查文件名\", check.file_name, None)\n self.add_widget(widget)\n \n #check transform attr\n widget = checkwidget.ItemWidget(u\"检查通道属性值\", _check_attr, None)\n self.add_widget(widget)\n #check rendering hierarchy\n widget = checkwidget.ItemWidget(u\"检查文件结构\", _check_hierarchy, None, False)\n self.add_widget(widget)\n #check history\n widget = checkwidget.ItemWidget(u\"检查模型历史\", _check_history, None, False)\n self.add_widget(widget)\n #check equal widget\n widget = checkwidget.ItemWidget(u\"检查相同模型\", _check_equalmesh, None)\n self.add_widget(widget)\n #check reference\n widget = checkwidget.ItemWidget(u\"检���动画层\", check.animation_layer, clear.animation_layer)\n self.add_widget(widget)\n widget = checkwidget.ItemWidget(u\"检查未知节点\", check.unknown_node, clear.unknown_node)\n self.add_widget(widget)\n #check un exists files\n widget = checkwidget.ItemWidget(u\"检查贴图文件是否不存在\", check.file_node, None, False)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查摄像机\", check.camera, clear.camera)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查灯光文件\", check.light, clear.light)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查动画曲线\", check.anim_curve, clear.anim_curve)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查显示层\", check.display_layer, clear.display_layer)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查渲染层\", check.render_layer, clear.render_layer)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查命名空间\", check.namespace, clear.namespace)\n self.add_widget(widget)\n \n widget = checkwidget.ItemWidget(u\"检查重命名\", check.repeat, None, False)\n self.add_widget(widget)\n \n #check texture path\n widget = checkwidget.ItemWidget(u\"检查贴图路径\", check.texture_path, None, False)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查材质命名\", _check_material, materialcheck.CheckShader().repair, False)\n self.add_widget(widget)\n\n widget = checkwidget.ItemWidget(u\"检查贴图命名\", _check_tex_name, None, False)\n self.add_widget(widget)\n\ndef _check_attr():\n #get all transform\n _un = [\"front\",\"persp\",\"side\",\"top\"]\n _all_trans = cmds.ls(type = \"transform\")\n _use_tans = list(set(_all_trans) - set(_un))\n _de = []\n for _tans in _use_tans:\n _t = cmds.getAttr(\"%s.translate\"%_tans)\n _r = cmds.getAttr(\"%s.rotate\"%_tans)\n _s = cmds.getAttr(\"%s.scale\"%_tans)\n _child = cmds.listRelatives(_tans, c = True, type = \"mesh\")\n if _child:\n if _t != [(0.0, 0.0, 0.0)] or _r != [(0.0, 0.0, 0.0)] or _s != [(1.0, 1.0, 1.0)]:\n _de.append(_tans)\n if _de:\n info = u\"通道属性值不为空\\n\"\n for child in _de:\n info += \"{}\\n\".format(child)\n return False,info\n return True, None\n\ndef _check_history():\n import pymel.core as pm\n _history = []\n allDags = pm.ls(dag = 1)\n for dag in allDags: \n _his = dag.history()\n #_his = [n for n in dag.history(il=1, pdo = True)]\n _his = [n for n in dag.history(il=1, pdo = True) if n.type() != \"shadingEngine\"]\n if _his and dag.type() == \"mesh\":\n _history.append(dag)\n if _history:\n _history = list(set(_history))\n info = u\"错误:部分模型存在历史记录\\n\"\n for child in _history:\n info += u\"%s\\n\"%child\n return False,info\n else:\n return True, None\n\ndef _check_hierarchy():\n rendering = []\n allDags = cmds.ls(dag = True)\n for dag in allDags:\n #print dag\n #get \n if cmds.objExists(\"%s.rendering\"%dag):\n value = cmds.getAttr(\"%s.rendering\"%dag)\n if value:\n rendering.append(dag)\n #return rendering\n if not rendering:\n info = u\"文件组织结构错误,请用分组工具分组整合文件\\n\"\n return False,info\n else:\n return True, None\n\ndef _check_equalmesh():\n import maya.api.OpenMaya as om\n _info = []\n _error_meshs = []\n _top_dags = cmds.ls(type = \"mesh\")\n for _top_dag in _top_dags:\n #get dag hierarchy\n allDags = cmds.ls(_top_dag, dag = True, ni = True, type = \"mesh\")\n # print allDags\n for dag in allDags:\n selectionList = om.MSelectionList()\n selectionList.add( dag)\n node = selectionList.getDependNode(0)\n fnMesh = om.MFnMesh(node)\n dag_info = \"\"\n dag_info += \" %s\"%(fnMesh.numVertices)\n dag_info += \" %s\"%(fnMesh.numEdges)\n dag_info += \" %s\"%(fnMesh.numPolygons)\n #_info.append(dag_info)\n if dag_info in _info:\n _error_meshs.append(fnMesh.name())\n else:\n _info.append(dag_info)\n if _error_meshs:\n _info = u\"场景存在相同模型\\n\"\n for _mesh in _error_meshs:\n _info += \"{}\\n\".format(_mesh)\n return False, _info\n return True, None\n\ndef _check_material():\n _check = materialcheck.CheckShader()\n _info = _check.check_shader()\n if _info:\n info = u\"材质命名错误(无法修复的请检查是否是默认材质)\\n\"\n info += \"\".join(sorted(_info))\n return False, info\n return True, None\n\ndef _check_tex_name():\n _check = materialcheck.CheckShader()\n _info = _check.check_texture()\n if _info:\n info = u\"贴图命名错误,请手动检查\\n\"\n info += \"\".join(sorted(_info))\n return False, info\n return True, None", "sub_path": "zfused_maya/zfused_maya/tool/shading/shadingcheck.py", "file_name": "shadingcheck.py", "file_ext": "py", "file_size_in_byte": 7569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "zfused_maya.widgets.checkwidget.CheckWidget", "line_number": 12, "usage_type": "attribute"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 12, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.CheckWidget", "line_number": 38, "usage_type": "attribute"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 38, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.CheckWidget", "line_number": 39, "usage_type": "attribute"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 39, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.Check", "line_number": 40, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 40, "usage_type": "name"}, {"api_name": "zfused_maya.core.restricted.restricted", "line_number": 48, "usage_type": "call"}, {"api_name": "zfused_maya.core.restricted", "line_number": 48, "usage_type": "name"}, {"api_name": "maya.cmds.confirmDialog", "line_number": 50, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 50, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 59, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 59, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.file_name", "line_number": 59, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 59, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 63, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 63, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 66, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 66, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 69, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 69, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 72, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 72, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 75, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 75, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.animation_layer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 75, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.animation_layer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 75, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 77, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 77, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.unknown_node", "line_number": 77, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 77, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.unknown_node", "line_number": 77, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 77, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 80, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 80, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.file_node", "line_number": 80, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 80, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 83, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 83, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.camera", "line_number": 83, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 83, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.camera", "line_number": 83, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 83, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 86, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 86, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.light", "line_number": 86, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 86, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.light", "line_number": 86, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 86, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 89, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 89, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.anim_curve", "line_number": 89, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 89, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.anim_curve", "line_number": 89, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 89, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 92, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 92, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.display_layer", "line_number": 92, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 92, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.display_layer", "line_number": 92, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 92, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 95, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 95, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.render_layer", "line_number": 95, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 95, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.render_layer", "line_number": 95, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 95, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 98, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 98, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.namespace", "line_number": 98, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 98, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.clear.namespace", "line_number": 98, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.clear", "line_number": 98, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 101, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 101, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.repeat", "line_number": 101, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 101, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 105, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 105, "usage_type": "name"}, {"api_name": "zfused_maya.node.core.check.texture_path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "zfused_maya.node.core.check", "line_number": 105, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 108, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 108, "usage_type": "name"}, {"api_name": "zfused_maya.tool.modeling.materialcheck.CheckShader", "line_number": 108, "usage_type": "call"}, {"api_name": "zfused_maya.tool.modeling.materialcheck", "line_number": 108, "usage_type": "name"}, {"api_name": "zfused_maya.widgets.checkwidget.ItemWidget", "line_number": 111, "usage_type": "call"}, {"api_name": "zfused_maya.widgets.checkwidget", "line_number": 111, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 117, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 117, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 121, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 121, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 122, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 122, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 123, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 123, "usage_type": "name"}, {"api_name": "maya.cmds.listRelatives", "line_number": 124, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 124, "usage_type": "name"}, {"api_name": "pymel.core.ls", "line_number": 138, "usage_type": "call"}, {"api_name": "pymel.core", "line_number": 138, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 156, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 156, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 160, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 160, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 161, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 161, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 175, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 175, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 178, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 178, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MSelectionList", "line_number": 181, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 181, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MFnMesh", "line_number": 184, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 184, "usage_type": "name"}, {"api_name": "zfused_maya.tool.modeling.materialcheck.CheckShader", "line_number": 202, "usage_type": "call"}, {"api_name": "zfused_maya.tool.modeling.materialcheck", "line_number": 202, "usage_type": "name"}, {"api_name": "zfused_maya.tool.modeling.materialcheck.CheckShader", "line_number": 211, "usage_type": "call"}, {"api_name": "zfused_maya.tool.modeling.materialcheck", "line_number": 211, "usage_type": "name"}]} +{"seq_id": "442061271", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: Peter. Wong\n# @Time: 2018/12/29 10:17\n\nimport numpy as np # numpy库\nfrom sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法\nfrom sklearn.model_selection import cross_val_score # 交叉检验\nfrom sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法\nimport pandas as pd # 导入pandas\nfrom sklearn import preprocessing # 导入归一化工具\n\n# 数据准备\nnum_load = 6 # 载荷分量个数\nnum_result = 3 # 预测热点位置序号\nraw_data = np.loadtxt('base.txt') # 读取数据文件\nX0 = raw_data[:,:num_load] # 分割自变量\ny0 = raw_data[:,num_load:]\ny1 = raw_data[:, num_load+num_result].reshape(-1, 1) # 分割因变量\n# y1 = np.zeros([X0.shape[0],1])\n# for i, maxv in enumerate(y0):\n# y1[i] = max(y0[i])\n\n# 数据归一化\nX = preprocessing.scale(X0)\nscaler_in = preprocessing.StandardScaler().fit(X0)\ny = preprocessing.scale(y1)\nscaler_out = preprocessing.StandardScaler().fit(y1)\n\n# 训练回归模型\nn_folds = 6 # 设置交叉检验的次数\nmodel_gbr = GradientBoostingRegressor(n_estimators=500,max_depth=3) # 建立梯度增强回归模型对象\nmodel_names = ['GBR'] # 不同模型的名称列表\nmodel_dic = [model_gbr] # 不同回归模型对象的集合\ncv_score_list = [] # 交叉检验结果列表\npre_y_list = [] # 各个回归模型预测的y值列表\nfor model in model_dic: # 读出每个回归模型对象\n scores = cross_val_score(model, X,y.ravel(), cv=n_folds) # 将每个回归模型导入交叉检验模型中做训练检验\n cv_score_list.append(scores) # 将交叉检验结果存入结果列表\n pre_y_list.append(scaler_out.inverse_transform(model.fit(X, y.ravel()).predict(X))) # 将回归训练中得到的预测y存入列表\n\n# 模型效果指标评估\nn_samples, n_features = X.shape # 总样本量,总特征数\nmodel_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集\nmodel_metrics_list = [] # 回归评估指标列表\nfor i in range(1): # 循环每个模型索引\n tmp_list = [] # 每个内循环的临时结果列表\n for m in model_metrics_name: # 循环每个指标对象\n tmp_score = m(y, pre_y_list[i]) # 计算每个回归指标结果\n tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表\n model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表\ndf1 = pd.DataFrame(cv_score_list, index=model_names) # 建立交叉检验的数据框\ndf2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框\nprint ('samples: %d \\t features: %d' % (n_samples, n_features)) # 打印输出样本量和特征数量\nprint (70 * '-') # 打印分隔线\nprint ('cross validation result:') # 打印输出标题\nprint (df1) # 打印输出交叉检验的数据框\n# print (70 * '-') # 打印分隔线\n# print ('regression metrics:') # 打印输出标题\n# print (df2) # 打印输出回归指标的数据框\n# print (70 * '-') # 打印分隔线\n# print ('short name \\t full name') # 打印输出缩写和全名标题\n# print ('ev \\t explained_variance')\n# print ('mae \\t mean_absolute_error')\n# print ('mse \\t mean_squared_error')\n# print ('r2 \\t r2')\n# print (70 * '-') # 打印分隔线", "sub_path": "Data_Analysis/131-2300/GBR_VA.py", "file_name": "GBR_VA.py", "file_ext": "py", "file_size_in_byte": 3439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.loadtxt", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 26, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 28, "usage_type": "name"}, {"api_name": "sklearn.ensemble.gradient_boosting.GradientBoostingRegressor", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.explained_variance_score", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "6181075", "text": "#import gevent.monkey\n#gevent.monkey.patch_all()\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask, render_template, request, Response\nfrom flask_socketio import SocketIO, join_room, emit\nimport game\nfrom game import RequestDenied\n\n# initialize Flask\nfrom pylti.flask import lti\nVERSION = '0.0.1'\napp = Flask(__name__)\napp.config.from_object('config')\nsocketio = SocketIO(app)\nROOMS = {} # dict to track active rooms\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True, nullable=False)\n firstname = db.Column(db.String(80), nullable=False)\n lastname = db.Column(db.String(80), nullable=False)\n lti_user_id = db.Column(db.String(255), unique=True, nullable=False)\n\n def __repr__(self):\n return '' % self.username\n def to_dict(self):\n return({ 'id': self.id,\n 'username': self.username,\n 'firstname': self.firstname,\n 'lastname': self.lastname,\n 'lti_user_id': self.lti_user_id })\n\ndef error(exception=None):\n \"\"\" render error page\n\n :param exception: optional exception\n :return: the error.html template rendered\n \"\"\"\n return render_template('error.html')\n\n\nimport io\nimport random\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.backends.backend_svg import FigureCanvasSVG\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import Ellipse\n\n@app.route(\"/mapping_diagram--.svg\")\ndef mapping_diagram(N=5, seed=0):\n \"\"\" renders the plot on the fly.\n \"\"\"\n fig = Figure()\n random.seed(int(seed))\n x = [random.randint(-10,10) for i in range(N)]\n y = [random.randint(-10,10) for i in range(N)]\n app.logger.error(x)\n app.logger.error(y)\n inputs = list(set(x))\n outputs = list(set(y))\n n = max(len(inputs),len(outputs))\n axis = fig.add_subplot(1, 1, 1)\n ells = [Ellipse((0, -float(n-1)/2), n, 2, 90), Ellipse((3, -float(n-1)/2), n, 2, 90)]\n for e in ells:\n axis.add_artist(e)\n axis.axis('off')\n axis.scatter([-1,4,4],[1,-len(inputs),-len(outputs)],marker=\",\",alpha=0)\n for i in range(len(inputs)):\n #axis.annotate(str(inputs[i]),(-i,0))\n axis.text(0,-i,str(inputs[i]))\n for j in range(len(outputs)):\n #axis.annotate(str(outputs[j]),(-j,3))\n axis.text(3,-j,str(outputs[j]))\n for x_,y_ in zip(x,y):\n i = inputs.index(x_)\n j = outputs.index(y_)\n axis.arrow(0.2,-i, 2.7, -(j-i), head_width=0.1, head_length=0.1, fc='k', ec='k')\n output = io.BytesIO()\n FigureCanvasSVG(fig).print_svg(output)\n return Response(output.getvalue(), mimetype=\"image/svg+xml\")\n\n@app.route(\"/graph--.svg\")\ndef plot_svg(N=50, seed=0):\n \"\"\" renders the plot on the fly.\n \"\"\"\n fig = Figure()\n random.seed(int(seed))\n x = [random.randint(-10,10) for i in range(N)]\n y = [random.randint(-10,10) for i in range(N)]\n\n axis = fig.add_subplot(1, 1, 1)\n axis.scatter(x, y)\n\n output = io.BytesIO()\n FigureCanvasSVG(fig).print_svg(output)\n return Response(output.getvalue(), mimetype=\"image/svg+xml\")\n@app.route('/memory_lti/', methods=['GET', 'POST'])\n@lti(request='initial', error=error, app=app)\ndef memory_init(lti=lti):\n \"\"\" initial access page to the lti provider. This page provides\n authorization for the user.\n\n :param lti: the `lti` object from `pylti`\n :return: index page for lti provider\n \"\"\"\n user = db.session.query(User).filter_by(lti_user_id=lti.name).first()\n if user:\n #return render_template('memory.html')\n return render_template('connect4.html')\n #return render_template('index.html', user=user)\n else:\n form = UserInfoForm()\n return render_template('GetUserInfo.html', lti=lti, form=form)\n\n@app.route('/memory')\n@lti(request='session', error=error, app=app)\ndef memory():\n \"\"\"Serve the index HTML\"\"\"\n return render_template('memory.html')\n\n@socketio.on('create')\n@lti(request='session', error=error, app=app)\ndef on_create(data, lti=lti):\n \"\"\"Create a game lobby\"\"\"\n #username = data['username']\n #gm = game.Game(deck_name='RelationDiagrams')\n #gm = game.MemoryGame(deck_name='clt1')\n gm = game.ConnectFourGame(deck_name='clt1')\n room = gm.room\n ROOMS[room] = gm\n data['room'] = room\n on_join(data)\n #join_room(room)\n #emit('join_room', {'room': room})\n\n@socketio.on('disconnect')\n@lti(request='session', error=error, app=app)\ndef disconnect(lti=lti):\n for room in ROOMS:\n player = ROOMS[room].get_player(request.sid)\n if player:\n ROOMS[room].remove_player(player)\n reset_game(room)\n\n@socketio.on('join')\n@lti(request='session', error=error, app=app)\ndef on_join(data, lti=lti):\n print(\"joining room\")\n \"\"\"Join a game lobby\"\"\"\n #username = data['username']\n room = data['room']\n print(lti)\n user = db.session.query(User).filter_by(lti_user_id=lti.name).first()\n if room in ROOMS:\n # add player and rebroadcast game object\n try:\n ROOMS[room].add_player(request.sid, user)\n except RequestDenied as err:\n emit('error', {'error': 'Unable to join room. {:s}'.format(err.message)})\n join_room(room)\n #send(ROOMS[room].to_json(), room=room)\n emit('join_room', {'room': room})\n reset_game(room)\n else:\n emit('error', {'error': 'Unable to join room. Room does not exist.'})\n\n@socketio.on('input')\n@lti(request='session', error=error, app=app)\ndef input(data, lti=lti):\n print(\"receiving input\")\n \"\"\"submit response and rebroadcast game object\"\"\"\n room = data['room']\n response = data['response']\n player = ROOMS[room].get_player(request.sid)\n try:\n ROOMS[room].input(player, response, update_game)\n except RequestDenied as err:\n print(err.message) \n\ndef update_game(room):\n print(\"updating game\")\n emit('update_game', {'flipped_cards': [card.to_dict() for card in ROOMS[room].flipped_cards], 'players': [player.to_dict() for player in ROOMS[room].players], 'active_player': ROOMS[room].active_player, 'dice': ROOMS[room].dice, 'selectable_cards': [card.to_dict() for card in ROOMS[room].selectable_cards]}, room=room)\n\ndef reset_game(room):\n print(\"reseting game\")\n emit('reset_game', {'flipped_cards': [card.to_dict() for card in ROOMS[room].flipped_cards], 'players': [player.to_dict() for player in ROOMS[room].players], 'active_player': ROOMS[room].active_player, 'dice': ROOMS[room].dice, 'selectable_cards': [card.to_dict() for card in ROOMS[room].selectable_cards]}, room=room)\n\n@socketio.on('roll')\n@lti(request='session', error=error, app=app)\ndef on_roll(data, lti=lti):\n \"\"\"flip card and rebroadcast game object\"\"\"\n print(\"flipping card\")\n room = data['room']\n player = ROOMS[room].get_player(request.sid)\n try:\n assert player is not None\n except AssertionError:\n emit('error', {'error': 'Unable to flip card. Player {:s} not in game'.format(request.sid)})\n try:\n ROOMS[room].roll(player, lambda x,y: emit('select', { 'player': player.to_dict(), 'x': x, 'y': y }, room=request.sid))\n update_game(room)\n except RequestDenied as err:\n print(err.message)\n #send(ROOMS[room].to_json(), room=room)\n\n@socketio.on('flip_card')\n@lti(request='session', error=error, app=app)\ndef on_flip_card(data, lti=lti):\n \"\"\"flip card and rebroadcast game object\"\"\"\n print(\"flipping card\")\n room = data['room']\n card = int(data['card'])\n player = ROOMS[room].get_player(request.sid)\n try:\n assert player is not None\n except AssertionError:\n emit('error', {'error': 'Unable to flip card. Player {:s} not in game'.format(request.sid)})\n try:\n ROOMS[room].select_card(player, card, lambda: emit('prompt', { 'player': player.to_dict() }, room=request.sid))\n update_game(room)\n except RequestDenied as err:\n print(err.message)\n #send(ROOMS[room].to_json(), room=room)\n\n#@socketio.on('submit_answer')\n#def on_submit_answer(data):\n# \"\"\"flip card and rebroadcast game object\"\"\"\n# room = data['room']\n# answer = data['answer']\n# ROOMS[room].flip_card(card)\n# send(ROOMS[room].to_json(), room=room)\n\nif __name__ == '__main__':\n socketio.run(app, debug=True, host='0.0.0.0')\n", "sub_path": "memory.py", "file_name": "memory.py", "file_ext": "py", "file_size_in_byte": 8399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 54, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.patches.Ellipse", "line_number": 64, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_svg.FigureCanvasSVG", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 87, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 88, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 89, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 90, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_svg.FigureCanvasSVG", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 97, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 100, "usage_type": "name"}, {"api_name": "pylti.flask.lti.name", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pylti.flask.lti", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 114, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 114, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 120, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 117, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 124, "usage_type": "name"}, {"api_name": "game.ConnectFourGame", "line_number": 129, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 123, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 138, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 148, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 153, "usage_type": "argument"}, {"api_name": "pylti.flask.lti.name", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pylti.flask.lti", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "game.RequestDenied", "line_number": 159, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 160, "usage_type": "call"}, {"api_name": "flask_socketio.join_room", "line_number": 161, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 163, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 166, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 147, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 175, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 175, "usage_type": "name"}, {"api_name": "game.RequestDenied", "line_number": 178, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 169, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 183, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 187, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 195, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 195, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 201, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 201, "usage_type": "name"}, {"api_name": "game.RequestDenied", "line_number": 203, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 190, "usage_type": "call"}, {"api_name": "pylti.flask.lti", "line_number": 209, "usage_type": "name"}, {"api_name": "flask.request.sid", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 218, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 218, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.sid", "line_number": 220, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 220, "usage_type": "name"}, {"api_name": "game.RequestDenied", "line_number": 222, "usage_type": "name"}, {"api_name": "pylti.flask.lti", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "606423008", "text": "\nimport numpy as np\nimport paddle\nfrom tqdm import tqdm\nfrom .abc_interpreter import Interpreter\nfrom ..data_processor.readers import preprocess_inputs, preprocess_save_path\nfrom ..data_processor.visualizer import explanation_to_vis, show_vis_explanation, save_image\n\n\nclass SmoothGradInterpreter(Interpreter):\n \"\"\"\n Smooth Gradients Interpreter.\n\n Smooth Gradients method solves the problem of meaningless local variations in partial derivatives\n by adding random noise to the inputs multiple times and take the average of the\n gradients.\n\n More details regarding the Smooth Gradients method can be found in the original paper:\n http://arxiv.org/pdf/1706.03825.pdf\n \"\"\"\n\n def __init__(self,\n paddle_model,\n use_cuda=True,\n model_input_shape=[3, 224, 224]):\n \"\"\"\n Initialize the SmoothGradInterpreter.\n\n Args:\n paddle_model (callable): A paddle model that outputs predictions.\n use_cuda (bool, optional): Whether or not to use cuda. Default: True\n model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]\n \"\"\"\n Interpreter.__init__(self)\n self.paddle_model = paddle_model\n self.model_input_shape = model_input_shape\n self.data_type = 'float32'\n self.paddle_prepared = False\n\n self.use_cuda = use_cuda\n if not paddle.is_compiled_with_cuda():\n self.use_cuda = False\n\n def interpret(self,\n inputs,\n labels=None,\n noise_amount=0.1,\n n_samples=50,\n visual=True,\n save_path=None):\n \"\"\"\n Main function of the interpreter.\n\n Args:\n inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.\n labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None\n noise_amount (float, optional): Noise level of added noise to the image.\n The std of Guassian random noise is noise_amount * (x_max - x_min). Default: 0.1\n n_samples (int, optional): The number of new images generated by adding noise. Default: 50\n visual (bool, optional): Whether or not to visualize the processed image. Default: True\n save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None\n\n :return: interpretations/gradients for each image\n :rtype: numpy.ndarray\n \"\"\"\n\n imgs, data = preprocess_inputs(inputs, self.model_input_shape)\n\n bsz = len(data)\n save_path = preprocess_save_path(save_path, bsz)\n\n data_type = np.array(data).dtype\n self.data_type = data_type\n\n if not self.paddle_prepared:\n self._paddle_prepare()\n\n if labels is None:\n _, preds = self.predict_fn(data, None)\n labels = preds\n\n labels = np.array(labels).reshape((len(imgs), 1))\n\n max_axis = tuple(np.arange(1, data.ndim))\n stds = noise_amount * (\n np.max(data, axis=max_axis) - np.min(data, axis=max_axis))\n\n total_gradients = np.zeros_like(data)\n for i in tqdm(range(n_samples)):\n noise = np.concatenate([\n np.float32(\n np.random.normal(0.0, stds[j], (1, ) + tuple(d.shape)))\n for j, d in enumerate(data)\n ])\n data_noised = data + noise\n gradients, _ = self.predict_fn(data_noised, labels)\n total_gradients += gradients\n\n avg_gradients = total_gradients / n_samples\n\n # visualization and save image.\n for i in range(len(imgs)):\n print(imgs[i].shape, avg_gradients[i].shape)\n vis_explanation = explanation_to_vis(imgs[i], np.abs(avg_gradients[i]).sum(0), style='overlay_grayscale')\n if visual:\n show_vis_explanation(vis_explanation)\n if save_path[i] is not None:\n save_image(save_path[i], vis_explanation)\n\n return avg_gradients\n\n def _paddle_prepare(self, predict_fn=None):\n if predict_fn is None:\n paddle.set_device('gpu:0' if self.use_cuda else 'cpu')\n # to get gradients, the ``train`` mode must be set.\n self.paddle_model.train()\n\n for n, v in self.paddle_model.named_sublayers():\n if \"batchnorm\" in v.__class__.__name__.lower():\n v._use_global_stats = True\n if \"dropout\" in v.__class__.__name__.lower():\n v.p = 0\n\n def predict_fn(data, labels):\n data = paddle.to_tensor(data)\n data.stop_gradient = False\n out = self.paddle_model(data)\n out = paddle.nn.functional.softmax(out, axis=1)\n preds = paddle.argmax(out, axis=1)\n if labels is None:\n labels = preds.numpy()\n labels_onehot = paddle.nn.functional.one_hot(\n paddle.to_tensor(labels), num_classes=out.shape[1])\n target = paddle.sum(out * labels_onehot, axis=1)\n # gradients = paddle.grad(outputs=[target], inputs=[data])[0]\n target.backward()\n gradients = data.grad\n if isinstance(gradients, paddle.Tensor):\n gradients = gradients.numpy()\n return gradients, labels\n\n self.predict_fn = predict_fn\n self.paddle_prepared = True\n", "sub_path": "interpretdl/interpreter/smooth_grad.py", "file_name": "smooth_grad.py", "file_ext": "py", "file_size_in_byte": 5840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "abc_interpreter.Interpreter", "line_number": 10, "usage_type": "name"}, {"api_name": "abc_interpreter.Interpreter.__init__", "line_number": 34, "usage_type": "call"}, {"api_name": "abc_interpreter.Interpreter", "line_number": 34, "usage_type": "name"}, {"api_name": "paddle.is_compiled_with_cuda", "line_number": 41, "usage_type": "call"}, {"api_name": "data_processor.readers.preprocess_inputs", "line_number": 67, "usage_type": "call"}, {"api_name": "data_processor.readers.preprocess_save_path", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 88, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "data_processor.visualizer.explanation_to_vis", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 104, "usage_type": "call"}, {"api_name": "data_processor.visualizer.show_vis_explanation", "line_number": 106, "usage_type": "call"}, {"api_name": "data_processor.visualizer.save_image", "line_number": 108, "usage_type": "call"}, {"api_name": "paddle.set_device", "line_number": 114, "usage_type": "call"}, {"api_name": "paddle.to_tensor", "line_number": 125, "usage_type": "call"}, {"api_name": "paddle.nn.functional.softmax", "line_number": 128, "usage_type": "call"}, {"api_name": "paddle.nn", "line_number": 128, "usage_type": "attribute"}, {"api_name": "paddle.argmax", "line_number": 129, "usage_type": "call"}, {"api_name": "paddle.nn.functional.one_hot", "line_number": 132, "usage_type": "call"}, {"api_name": "paddle.nn", "line_number": 132, "usage_type": "attribute"}, {"api_name": "paddle.to_tensor", "line_number": 133, "usage_type": "call"}, {"api_name": "paddle.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "paddle.Tensor", "line_number": 138, "usage_type": "attribute"}]} +{"seq_id": "375771591", "text": "# This code is mainly excerpted from openai baseline code.\n# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py\nimport numpy as np\nfrom collections import deque\nimport gym\nfrom gym import spaces\nimport cv2\nfrom abc import ABC,abstractmethod\nfrom multiprocessing import Process, Pipe\nfrom monitor import Monitor\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env=None):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n super(EpisodicLifeEnv, self).__init__(env)\n self.lives = 0\n self.was_real_done = True\n self.was_real_reset = False\n\n def _step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert somtimes we stay in lives == 0 condtion for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def _reset(self):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset()\n self.was_real_reset = True\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.was_real_reset = False\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env=None, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n super(NoopResetEnv, self).__init__(env)\n self.noop_max = noop_max\n self.override_num_noops = None\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def _reset(self):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset()\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = np.random.randint(1, self.noop_max + 1)\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(0)\n if done:\n obs = self.env.reset()\n return obs\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env=None, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(MaxAndSkipEnv, self).__init__(env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = deque(maxlen=2)\n self._skip = skip\n\n def _step(self, action):\n total_reward = 0.0\n done = None\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n\n max_frame = np.max(np.stack(self._obs_buffer), axis=0)\n\n return max_frame, total_reward, done, info\n\n def _reset(self):\n \"\"\"Clear past frame buffer and init. to first obs. from inner env.\"\"\"\n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n return obs\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env=None):\n \"\"\"For environments where the user need to press FIRE for the game to start.\"\"\"\n super(FireResetEnv, self).__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def _reset(self):\n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset()\n return obs\n\nclass ProcessFrame84(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))\n\n def _observation(self, obs):\n return ProcessFrame84.process(obs)\n\n @staticmethod\n def process(frame):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, [210, 160, 3]).astype(np.float32)\n elif frame.size == 250 * 160 * 3:\n img = np.reshape(frame, [250, 160, 3]).astype(np.float32)\n else:\n assert False, \"Unknown resolution.\"\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114\n resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)\n x_t = resized_screen[18:102, :]\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n Change image shape to CWH\n \"\"\"\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]))\n\n def _observation(self, observation):\n return np.swapaxes(observation, 2, 0)\n\n\nclass ClippedRewardsWrapper(gym.RewardWrapper):\n def _reward(self, reward):\n \"\"\"Change all the positive rewards to 1, negative to -1 and keep zero.\"\"\"\n return np.sign(reward)\n\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not belive how complex the previous solution was.\"\"\"\n self._frames = frames\n\n def __array__(self, dtype=None):\n out = np.concatenate(self._frames, axis=0)\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0]*k, shp[1], shp[2]))\n\n def _reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def _step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\nclass VecEnv(ABC):\n\n def __init__(self, num_envs, observation_space, action_space):\n self.num_envs = num_envs\n self.observation_space = observation_space\n self.action_space = action_space\n\n \"\"\"\n An abstract asynchronous, vectorized environment.\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Reset all the environments and return an array of\n observations.\n If step_async is still doing work, that work will\n be cancelled and step_wait() should not be called\n until step_async() is invoked again.\n \"\"\"\n pass\n\n @abstractmethod\n def step_async(self, actions):\n \"\"\"\n Tell all the environments to start taking a step\n with the given actions.\n Call step_wait() to get the results of the step.\n You should not call this if a step_async run is\n already pending.\n \"\"\"\n pass\n\n @abstractmethod\n def step_wait(self):\n \"\"\"\n Wait for the step taken with step_async().\n Returns (obs, rews, dones, infos):\n - obs: an array of observations\n - rews: an array of rewards\n - dones: an array of \"episode done\" booleans\n - infos: an array of info objects\n \"\"\"\n pass\n\n @abstractmethod\n def close(self):\n \"\"\"\n Clean up the environments' resources.\n \"\"\"\n pass\n\n def step(self, actions):\n self.step_async(actions)\n return self.step_wait()\n\n def render(self):\n logger.warn('Render not defined for %s'%self)\n\ndef worker(remote, parent_remote, env_fn_wrapper):\n parent_remote.close()\n env = env_fn_wrapper.x()\n while True:\n cmd, data = remote.recv()\n if cmd == 'step':\n ob, reward, done, info = env.step(data)\n if done:\n ob = env.reset()\n remote.send((ob, reward, done, info))\n elif cmd == 'reset':\n ob = env.reset()\n remote.send(ob)\n elif cmd == 'reset_task':\n ob = env.reset_task()\n remote.send(ob)\n elif cmd == 'close':\n remote.close()\n break\n elif cmd == 'get_spaces':\n remote.send((env.observation_space, env.action_space))\n else:\n raise NotImplementedError\n\nclass CloudpickleWrapper(object):\n \"\"\"\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n \"\"\"\n def __init__(self, x):\n self.x = x\n def __getstate__(self):\n import cloudpickle\n return cloudpickle.dumps(self.x)\n def __setstate__(self, ob):\n import pickle\n self.x = pickle.loads(ob)\n\nclass SubprocVecEnv(VecEnv):\n def __init__(self, env_fns, spaces=None):\n \"\"\"\n envs: list of gym environments to run in subprocesses\n \"\"\"\n self.waiting = False\n self.closed = False\n nenvs = len(env_fns)\n self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n for p in self.ps:\n p.daemon = True # if the main process crashes, we should not cause things to hang\n p.start()\n for remote in self.work_remotes:\n remote.close()\n\n self.remotes[0].send(('get_spaces', None))\n observation_space, action_space = self.remotes[0].recv()\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n\n def step_async(self, actions):\n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n self.waiting = True\n\n def step_wait(self):\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return np.stack(obs), np.stack(rews), np.stack(dones), infos\n\n def reset(self):\n for remote in self.remotes:\n remote.send(('reset', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def reset_task(self):\n for remote in self.remotes:\n remote.send(('reset_task', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def close(self):\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes: \n remote.recv()\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n self.closed = True\n\ndef wrap(env):\n \"\"\"Apply a common set of wrappers for Atari games.\"\"\"\n assert 'NoFrameskip' in env.spec.id\n env = EpisodicLifeEnv(env)\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = ProcessFrame84(env)\n env = ImageToPyTorch(env)\n env = FrameStack(env, 4)\n return env\n\ndef wrap_cover(env_name):\n def wrap_():\n \"\"\"Apply a common set of wrappers for Atari games.\"\"\"\n env = gym.make(env_name)\n env = Monitor(env, './')\n assert 'NoFrameskip' in env.spec.id\n env = EpisodicLifeEnv(env)\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = ProcessFrame84(env)\n env = ImageToPyTorch(env)\n env = FrameStack(env, 4)\n env = ClippedRewardsWrapper(env)\n return env\n return wrap_", "sub_path": "Distributional_RL/wrappers.py", "file_name": "wrappers.py", "file_ext": "py", "file_size_in_byte": 13103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "gym.Wrapper", "line_number": 12, "usage_type": "attribute"}, {"api_name": "gym.Wrapper", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "gym.Wrapper", "line_number": 76, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 94, "usage_type": "call"}, {"api_name": "gym.Wrapper", "line_number": 105, "usage_type": "attribute"}, {"api_name": "gym.ObservationWrapper", "line_number": 122, "usage_type": "attribute"}, {"api_name": "gym.spaces.Box", "line_number": 125, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 135, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 142, "usage_type": "attribute"}, {"api_name": "gym.ObservationWrapper", "line_number": 144, "usage_type": "attribute"}, {"api_name": "gym.spaces.Box", "line_number": 151, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.swapaxes", "line_number": 154, "usage_type": "call"}, {"api_name": "gym.RewardWrapper", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.sign", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 173, "usage_type": "call"}, {"api_name": "gym.Wrapper", "line_number": 179, "usage_type": "attribute"}, {"api_name": "gym.Wrapper.__init__", "line_number": 187, "usage_type": "call"}, {"api_name": "gym.Wrapper", "line_number": 187, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 189, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 191, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 191, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 208, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 218, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 229, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 240, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 252, "usage_type": "name"}, {"api_name": "cloudpickle.dumps", "line_number": 298, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 301, "usage_type": "call"}, {"api_name": "multiprocessing.Pipe", "line_number": 311, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 343, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 373, "usage_type": "call"}, {"api_name": "monitor.Monitor", "line_number": 374, "usage_type": "call"}]} +{"seq_id": "64128467", "text": "# encoding: UTF-8\n\nfrom __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\n\nfrom dapps.celeryCommon import RetryableError, Retryable, getMappedAs\nfrom dapps.sinaMaster.worker import thePROG\nimport dapps.sinaCrawler.tasks_Dayend as CTDayend\n\nimport crawler.crawlSina as sina\nimport crawler.producesSina as prod\n\nfrom MarketData import *\nimport HistoryData as hist\n\nimport h5tar, h5py, pickle, bz2\nfrom urllib.parse import quote, unquote\nimport sys, os, re, glob, stat, shutil, fnmatch\nfrom datetime import datetime, timedelta\n\nSYMBOL_LIST_HEADERSEQ=\"symbol,name,mktcap,nmc,turnoverratio,open,high,low,close,volume,amount,ticktime,changepercent\"\nEOL = \"\\r\\n\"\nSINA_USERS_ROOT = '/mnt/data/hpwkspace/users'\nMAPPED_USER, MAPPED_HOME = getMappedAs(homeDir = '/mnt/s') # master certainly take the local volume /mnt/s\nif MAPPED_USER in [ None, 'nobody'] : MAPPED_USER = 'hpx'\nSUBDIR_Reqs = 'reqs'\nDIR_ARCHED_HOME = os.path.join(MAPPED_HOME, 'archived', 'sina')\n\nWORKDIR_CACHE = '/tmp/sina_cache'\ntry:\n os.mkdir(WORKDIR_CACHE)\nexcept:\n WORKDIR_CACHE = '/tmp'\n\n'''\nIDXs_to_COLLECT=[ # http://vip.stock.finance.sina.com.cn/mkt/#dpzs\n'SH000001',\t# 上证指数\n'SZ399001',\t# 深证成指\n'SZ399005',\t# 中小板指\n'SZ399006',\t# 创业板指\n'SH000011',\t# 基金指数\n]\n\nETFs_to_COLLECT=[ # asof 2020-12-08 top actives: http://vip.stock.finance.sina.com.cn/fund_center/index.html#jjhqetf\n'SH510300','SH512880','SH510050','SH510900','SH518880','SZ159919','SH510500','SZ159934','SZ159949','SH512000',\n'SH511660','SZ159920','SZ159995','SH588000','SH510330','SZ159915','SH515030','SH512760','SH512800','SZ159937',\n'SH512660','SH512480','SH512690','SH515700','SH515050','SH515380','SH518800','SH512400','SZ159922','SH588080',\n'SH512500','SZ159001','SH588050','SZ159003','SH510310','SH515000','SH513050','SH588090','SZ159992','SH510880',\n'SH513090','SH512290','SZ159928','SZ159901','SZ159806','SH511260','SH512010','SH515220','SZ159952','SH511810',\n'SH512710','SH510850','SH510510','SH512900','SZ159966','SH512170','SZ159994','SH511010','SH510180','SZ159996',\n'SZ159801','SZ159967','SH510230','SH515210','SZ159993','SH515880','SZ159997','SH513100','SZ159807','SH512070',\n'SZ159941','SH515330','SH511380','SH515260','SH512200','SH513500','SZ159905','SH512720','SZ159820','SH512980',\n'SH515650','SH515800','SH515560','SH511690','SH515770','SH510760','SH515750','SZ159819','SZ159948','SH512100',\n'SH512670','SZ159813','SH512700','SZ159977','SH510710','SH510630','SZ159939','SH510580','SH510350','SZ159968',\n'SZ159902','SH512680','SH512910','SZ159998','SH513300','SZ159816','SH512090','SH510100','SZ159972','SH512160',\n'SZ159980','SH515530','SH512580','SH515630','SZ159938','SZ159811','SZ159985','SH515390','SZ159929','SH515580',\n'SH515070','SH510800','SH510600','SH511180','SH515980','SZ159808','SH512510','SH510390','SH510150','SH512730'\n]\n'''\n\nIDXs_to_COLLECT = prod.listAllIndexs()\nETFs_to_COLLECT = prod.listAllETFs()\n\nSYMBOLS_WithNoMF = IDXs_to_COLLECT + ETFs_to_COLLECT\n\nTASK_TIMEOUT_DownloadToday = timedelta(minutes=60)\nBATCHSIZE_DownloadToday = 500\n\nTODAY_YYMMDD = None\n\n@shared_task\ndef add(x, y):\n sleep(30)\n return x + y\n\n@shared_task\ndef mul(x, y):\n sleep(30)\n return x * y\n\n@shared_task\ndef xsum(numbers):\n return sum(numbers)\n\nimport math\n__totalAmt1W =1\n__dtDummyOpen = datetime.strptime('20000101T09:30:00', '%Y%m%dT%H:%M:%S')\n\ndef R_activity(item):\n # ret = item['amount'] / __totalAmt1W\n # if ret >0.0:\n # ret = 10* math.sqrt(math.sqrt(ret))\n # if item['turnoverratio'] >0.2 :\n # ret += math.sqrt(math.sqrt(item['turnoverratio']))\n # else: ret /=2\n\n DailizeRatio_tr =1\n if 'ticktime' in item and isinstance(item['ticktime'], str) :\n DailizeRatio_tr = datetime.strptime('20000101T' + item['ticktime'], '%Y%m%dT%H:%M:%S').replace(year=__dtDummyOpen.year,month=__dtDummyOpen.month,day=__dtDummyOpen.day) - __dtDummyOpen\n DailizeRatio_tr = 5.5*60*60 / DailizeRatio_tr.seconds if DailizeRatio_tr.seconds >0 else 1\n\n ret = min(8, item['turnoverratio'] * DailizeRatio_tr)\n if (item['amount'] * DailizeRatio_tr) <1.0e8 : ret =0 # skip those amount less than 50M\n ret += min(7, math.sqrt(item['amount'] / __totalAmt1W)) # /10\n\n return ret\n\ndef _writeCsv(f, sybmolLst, columeline=None) :\n if not columeline: columeline = SYMBOL_LIST_HEADERSEQ\n f.write(columeline + EOL)\n for i in sybmolLst:\n line = ','.join([str(i[k]) for k in columeline.split(',')]) + EOL\n f.write(line)\n\ndef __rmfile(fn) :\n try :\n os.remove(fn)\n except:\n pass\n\ndef _topN(topNum =500, lstResult=None):\n if lstResult is None:\n lstResult = listAllSymbols()\n \n topActs = list(filter(lambda x: not 'SZ3' in x['symbol'] and not 'SH68' in x['symbol'], lstResult)) # 跳过创业、科创板\n if topNum<=0:\n topNum = min(500, int(len(topActs)/50) *10)\n\n del topActs[topNum:]\n return topActs\n\n# ===================================================\n@shared_task(bind=True, base=Retryable)\ndef listAllSymbols(self):\n\n lstSHZ = []\n fnCachedLst = os.path.join(MAPPED_HOME, 'hpx_publish', 'lstSHZ_%s.pkl.bz2' % datetime.now().strftime('%Y%m%d'))\n try :\n st = os.stat(fnCachedLst)\n ctime = datetime.fromtimestamp(st.st_ctime)\n if st.st_size >1000 and (ctime.isoweekday() >5 or ctime.hour >=16): \n with bz2.open(fnCachedLst, 'rb') as f:\n lstSHZ = pickle.load(f)\n except Exception as ex:\n pass\n\n if len(lstSHZ) <=2000:\n lstSH, lstSZ = prod.listAllSymbols(thePROG)\n if len(lstSH) <= 0 or len(lstSZ) <= 0:\n raise RetryableError(456, \"empty SH[%s] or empty SH[%s] fetched\" %(len(lstSH), len(lstSZ)))\n\n lstSHZ = {} # temporarily via dict\n for i in lstSH + lstSZ:\n lstSHZ[i['symbol']] =i\n lstSHZ = list(lstSHZ.values())\n\n totalAmt1W=0\n for i in lstSHZ:\n totalAmt1W += i['amount'] /10000.0\n\n if totalAmt1W >1.0: # for R_activity() \n global __totalAmt1W\n __totalAmt1W = totalAmt1W\n\n noneST = list(filter(lambda x: not 'ST' in x['name'], lstSHZ))\n noneST.sort(key=R_activity)\n noneST.reverse()\n\n STs = list(filter(lambda x: 'ST' in x['name'], lstSHZ))\n STs.sort(key=R_activity)\n STs.reverse()\n\n try:\n topActs = _topN(800, noneST)\n fnTopActs = os.path.join(MAPPED_HOME, 'hpx_archived', 'sina', 'topAct%d_%s.csv.bz2' % (len(topActs), datetime.now().strftime('%Y%m%dT%H%M')))\n with bz2.open(fnTopActs, 'wt', encoding='utf-8') as f:\n _writeCsv(f, topActs)\n except :\n pass\n\n lstSHZ = noneST + STs\n for fn in glob.glob(os.path.join(MAPPED_HOME, 'hpx_publish') + \"/lstSHZ_*.pkl.bz2\") :\n try :\n os.remove(fn)\n except Exception as ex:\n pass\n\n try:\n with bz2.open(fnCachedLst, 'wb') as f:\n f.write(pickle.dumps(lstSHZ))\n except :\n thePROG.warn('listAllSymbols() failed to write %s' % fnCachedLst)\n\n try:\n lstArch = os.path.join(MAPPED_HOME, 'hpx_archived', 'sina', 'lstSHZ_%s.csv.bz2' % datetime.now().strftime('%Y%m%d'))\n with bz2.open(lstArch, 'wt', encoding='utf-8') as f:\n _writeCsv(f, lstSHZ)\n except :\n thePROG.warn('listAllSymbols() failed to write %s' % lstArch)\n \n return lstSHZ\n\n # csvNoneST = SYMBOL_LIST_HEADERSEQ + EOL\n # for i in noneST:\n # csvNoneST += ','.join([str(i[k]) for k in SYMBOL_LIST_HEADERSEQ.split(',')]) +EOL\n\n # csvSTs = SYMBOL_LIST_HEADERSEQ + EOL\n # for i in STs:\n # csvSTs += ','.join([str(i[k]) for k in SYMBOL_LIST_HEADERSEQ.split(',')]) +EOL\n\n # return csvNoneST, csvSTs\n\n# ===================================================\n@shared_task(bind=True, base=Retryable, max_retries=5)\ndef commitToday(self, dictArgs) : # urgly at the parameter list\n '''\n in order to chain:\n import celery\n import dapps.sinaMaster.tasks as mt\n import dapps.sinaCrawler.tasks_Dayend as ct\n s3 = celery.chain(ct.downloadToday.s('SZ000005'), mt.commitToday.s())\n s3().get()\n '''\n if dictArgs is None:\n thePROG.warn('commitToday() None dictArgs, prev-req might be cancelled')\n return\n\n if not isinstance(dictArgs, dict) or len(dictArgs) <=0:\n thePROG.error('commitToday() invalid dictArgs: %s' % str(dictArgs))\n return\n\n login, asofYYMMDD = 'hpx01', datetime.now().strftime('%Y%m%d')\n login = dictArgs.get('login', login)\n asofYYMMDD = dictArgs.get('asofYYMMDD', asofYYMMDD)\n\n symbol = dictArgs.get('symbol', None)\n fnJsons = dictArgs.get('fnJsons', [])\n fnSnapshot = dictArgs.get('fnSnapshot', None)\n fnTcsv = dictArgs.get('fnTcsv', None)\n lastDays = dictArgs.get('lastDays', [])\n ''' sample value:\n fnJsons = ['SZ000002_KL1d20201202.json', 'SZ000002_MF1d20201202.json', 'SZ000002_KL5m20201202.json', 'SZ000002_MF1m20201202.json']\n fnSnapshot = 'SZ000002_sns.h5';\n ~{HOME}\n |-- archived -> ../archived\n `-- hpx_template -> /home/wkspaces/hpx_template\n 2021-01-03 10:05:03,683: DEBUG/ForkPoolWorker-1] commitToday() archived /mnt/data/hpwkspace/users/hpx/hpx_publish/SZ300422_day20201228.tcsv by[hpx] into /mnt/data/hpwkspace/users/master/archived/sina/SinaMDay_20201228.h5t\n '''\n\n if not symbol:\n thePROG.error('commitToday() invalid dictArgs: %s' % str(dictArgs))\n return\n\n if '@' in login : login = login[:login.index('@')]\n if ':' in login : login = login[:login.index(':')]\n\n pubDir = os.path.join(SINA_USERS_ROOT, login, 'hpx_publish')\n\n # pubDir = '/mnt/s/hpx_publish' # test hardcode\n # DIR_ARCHED_HOME = '/tmp/arch_test' # test hardcode\n\n try:\n os.mkdir(os.path.join(DIR_ARCHED_HOME, 'snapshots'))\n os.chmod(dirReqs, stat.S_IRWXU | stat.S_IRWXG |stat.S_IROTH )\n except: pass\n\n if TODAY_YYMMDD and asofYYMMDD < TODAY_YYMMDD:\n # this symbol must be frozen today\n thePROG.warn('commitToday() archiving %s_%s sounds not open, dictArgs: %s, cleaning %s' % (symbol, asofYYMMDD, str(dictArgs), pubDir))\n for fn in fnJsons + [fnTcsv, fnSnapshot]:\n if fn is None: continue\n srcpath = os.path.join(pubDir, fn)\n __rmfile(srcpath)\n asofYYMMDD = TODAY_YYMMDD # to clear the req of today\n else:\n\n thePROG.debug('commitToday() archiving %s_%s dictArgs: %s from %s to %s' % (symbol, asofYYMMDD, str(dictArgs), pubDir, DIR_ARCHED_HOME))\n\n # step 1. zip the JSON files\n for fn in fnJsons:\n srcpath = os.path.join(pubDir, fn)\n m = re.match(r'%s_([A-Za-z0-9]*)%s.json' %(symbol, asofYYMMDD), os.path.basename(srcpath))\n if not m : continue\n evtShort = m.group(1)\n\n try :\n destpath = os.path.join(DIR_ARCHED_HOME, 'Sina%s_%s.h5t' % (evtShort, asofYYMMDD) )\n if evtShort in ['Sum'] :\n destpath = os.path.join(DIR_ARCHED_HOME, 'SinaMDay_%s.h5t' % asofYYMMDD )\n \n if h5tar.tar_utf8(destpath, srcpath, baseNameAsKey=True) :\n thePROG.debug('commitToday() archived %s into %s' %(srcpath, destpath))\n __rmfile(srcpath)\n else:\n thePROG.error('commitToday() failed to archived %s into %s' %(srcpath, destpath))\n except Exception as ex:\n thePROG.logexception(ex, 'commitToday() archiving[%s->%s] error' % (srcpath, destpath))\n\n # step 2. zip the Tcsv file\n srcpath = os.path.join(pubDir, fnTcsv)\n destpath = os.path.join(DIR_ARCHED_HOME, 'SinaMDay_%s.h5t' % asofYYMMDD )\n if h5tar.tar_utf8(destpath, srcpath, baseNameAsKey=True) :\n thePROG.debug('commitToday() archived %s by[%s] into %s' %(srcpath, login, destpath))\n __rmfile(srcpath)\n else:\n thePROG.error('commitToday() failed to archived %s by[%s] into %s' %(srcpath, login, destpath))\n\n # step 3. append the snapshots\n if fnSnapshot and len(fnSnapshot)>0:\n srcpath = os.path.join(pubDir, fnSnapshot)\n '''\n destpath = os.path.join(DIR_ARCHED_HOME, 'SNS_%s.h5' % asofYYMMDD)\n try :\n gns = []\n with h5py.File(destpath, 'a') as h5w:\n # step 3.1, copy the new SNS into the dest h5f\n with h5py.File(srcpath, 'r') as h5r:\n for gn in h5r.keys():\n if not symbol in gn: continue\n g = h5r[gn]\n if not 'desc' in g.attrs.keys() or not 'pickled market state' in g.attrs['desc'] : continue\n gdesc = g.attrs['desc']\n\n if gn in h5w.keys(): del h5w[gn]\n # Note that this is not a copy of the dataset! Like hard links in a UNIX file system, objects in an HDF5 file can be stored in multiple groups\n # So, h5w[gn] = g doesn't work because across different files\n # go = h5w.create_group(gn)\n h5r.copy(g.name, h5w) # note the destGroup is the parent where the group want to copy under-to\n go = h5w[gn]\n gns.append(gn)\n\n thePROG.debug('commitToday() added snapshot[%s] of %s into %s' % (','.join(gns), srcpath, destpath))\n except Exception as ex:\n thePROG.logexception(ex, 'commitToday() snapshot[%s->%s] error' % (srcpath, destpath))\n '''\n __rmfile(srcpath)\n\n # step 4, delete the request file and record\n dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)\n # fnReq = os.path.join(dirReqs, '%s_%s.tcsv.bz2' % (asofYYMMDD, symbol))\n # __rmfile(fnReq)\n # thePROG.debug('commitToday() removed %s' % fnReq)\n\n dictDownloadReqs = _loadDownloadReqs(dirReqs)\n if asofYYMMDD in dictDownloadReqs.keys():\n dictToday = dictDownloadReqs[asofYYMMDD]\n if symbol in dictToday.keys():\n reqNode = dictToday[symbol]\n stampNow = datetime.now()\n taskId, stampIssued, tn = reqNode['taskId'], reqNode['stampIssued'], reqNode['taskFn']\n reqNode['stampCommitted'] = stampNow\n __rmfile(tn)\n thePROG.info('commitToday() dictDownloadReqs[%s][%s] task[%s] took %s by[%s], deleted %s' % (asofYYMMDD, symbol, taskId, stampNow - stampIssued, login, tn))\n \n nleft = len(dictToday)\n c = sum([1 if not v['stampCommitted'] else 0 for v in dictToday.values() ])\n thePROG.debug('commitToday() dictDownloadReqs[%s] has %d/%d onging' % (asofYYMMDD, c, nleft))\n \n _saveDownloadReqs(dirReqs)\n\n# ===================================================\n\n\n# RETRY_DOWNLOAD_INTERVAL = timedelta(hours=1)\nRETRY_DOWNLOAD_INTERVAL = timedelta(minutes=30)\n# ===================================================\n@shared_task(bind=True, ignore_result=True, expires=60)\ndef schChkRes_Crawlers(self, asofYYMMDD =None): # asofYYMMDD ='20201231'):\n global MAPPED_HOME, TODAY_YYMMDD\n\n if asofYYMMDD:\n TODAY_YYMMDD = asofYYMMDD\n\n stampNow = datetime.now()\n if not TODAY_YYMMDD:\n TODAY_YYMMDD = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')\n \n dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)\n\n thePROG.debug('schChkRes_Crawlers() refreshing tasks of downloadTodays[%s]' % TODAY_YYMMDD)\n __refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD)\n\n from dapps.sinaCrawler.worker import worker as crawler\n crawlers = crawler.control.ping(timeout=2.0, queue='crawler')\n crawlers = [ list(c.keys())[0] for c in crawlers ]\n thePROG.info('schChkRes_Crawlers() found %d crawlers: %s' % (len(crawlers), ','.join(crawlers) ) )\n '''\n cacheFiles = [ 'SinaMF1m_%s.h5t' %i for i in yymmddToCache]\n\n for c in crawlers:\n q = c.split('@')[0]\n if not q or len(q) <=0: continue\n r = CTDayend.fetchArchivedFiles.apply_async(args=[cacheFiles], queue=q)\n thePROG.info('schDo_pitchArchiedFiles() called crawler[%s].fetchArchivedFiles: %s' % (q, ','.join(cacheFiles)))\n '''\n\n\n# ===================================================\n__dictDownloadReqs = None\ndef _loadDownloadReqs(dirReqs) :\n global __dictDownloadReqs\n if not __dictDownloadReqs:\n fn = os.path.join(dirReqs, 'dictDownloadReqs.pkl.bz2')\n try:\n with bz2.open(fn, 'rb') as f:\n __dictDownloadReqs = pickle.load(f)\n except Exception as ex:\n __dictDownloadReqs ={}\n __rmfile(fn)\n\n return __dictDownloadReqs\n\ndef _saveDownloadReqs(dirReqs):\n global __dictDownloadReqs\n if not __dictDownloadReqs: return\n fn = os.path.join(dirReqs, 'dictDownloadReqs.pkl.bz2')\n try:\n with bz2.open(fn, 'wb') as f:\n f.write(pickle.dumps(__dictDownloadReqs))\n except Exception as ex:\n pass\n\n# ===================================================\ndef __refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD):\n\n dictDownloadReqs = _loadDownloadReqs(dirReqs)\n if not dictDownloadReqs or not TODAY_YYMMDD or not TODAY_YYMMDD in dictDownloadReqs.keys():\n thePROG.debug('__refreshBatch_DownloadToday() no active downloadToday[%s]' %TODAY_YYMMDD)\n return\n\n dictToday = dictDownloadReqs[TODAY_YYMMDD]\n thePROG.debug('__refreshBatch_DownloadToday() %d actives in downloadToday[%s]' %(len(dictToday), TODAY_YYMMDD))\n\n todels, bDirty = [], False\n reqsPending = []\n stampNow = datetime.now()\n\n for k, v in dictToday.items():\n if not v or not 'task' in v.keys() or not v['task']: \n todels.append(k)\n continue\n\n task = v['task']\n try :\n timelive = stampNow - v['stampIssued']\n if v['stampCommitted']:\n todels.append(k)\n thePROG.info('__refreshBatch_DownloadToday() downloadToday[%s]%s committed, duration %s, removed from dictToday' %(k, task.id, v['stampCommitted']-v['stampIssued']))\n continue\n\n if not v['stampReady'] and task.ready():\n v['stampReady'] = stampNow\n thePROG.debug('__refreshBatch_DownloadToday() downloadToday[%s]%s:%s succ[%s], took %s' %(k, task.id, task.state, task.successful(), timelive))\n continue\n\n if timelive > TASK_TIMEOUT_DownloadToday and task.state in ['PENDING', 'REVOKED']:\n todels.append(k)\n thePROG.warn('__refreshBatch_DownloadToday() downloadToday[%s]%s:%s took %s timeout, revoking[%s] and retry' %(k, task.id, task.state, timelive, task.parent.id))\n task.parent.revoke() # we only revoke the first in the chain here, always let commitToday go if its prev steps have been completed\n continue\n\n reqsPending.append(v['taskFn'])\n\n except Exception as ex:\n thePROG.logexception(ex, '__refreshBatch_DownloadToday() checking task of %s' % (k))\n\n if len(todels) >0:\n bDirty = True\n thePROG.info('__refreshBatch_DownloadToday() clearing %s keys: %s' % (len(todels), ','.join(todels)))\n for k in todels:\n del dictToday[k]\n\n cTasksToAdd = BATCHSIZE_DownloadToday - len(reqsPending)\n\n if cTasksToAdd <=0:\n thePROG.debug('__refreshBatch_DownloadToday() %d pendings[%s ~ %s] hit max %d, no more add-in' % (len(reqsPending), reqsPending[0], reqsPending[-1], BATCHSIZE_DownloadToday))\n return\n\n Tname_batchStart = os.path.basename(max(reqsPending)) if len(reqsPending) >0 else ''\n\n allfiles = hist.listAllFiles(dirReqs, depthAllowed=1)\n taskfiles, potentialRetries = [], []\n for fn in allfiles:\n bn = os.path.basename(fn)\n if not fnmatch.fnmatch(bn, 'T%s.*.pkl.bz2' % TODAY_YYMMDD) :\n continue\n\n if bn <= Tname_batchStart and (len(potentialRetries) + len(taskfiles)) < cTasksToAdd and not fn in reqsPending:\n potentialRetries.append(fn)\n continue\n\n taskfiles.append(fn)\n \n taskfiles.sort()\n potentialRetries.sort()\n newissued = []\n\n prefix2cut = DIR_ARCHED_HOME +'/'\n prefixlen = len(prefix2cut)\n\n for tn in taskfiles + potentialRetries:\n bn = os.path.basename(tn)\n symbol = bn.split('.')[2]\n exclMF = symbol in SYMBOLS_WithNoMF\n fnTask = tn[prefixlen:] if prefix2cut == tn[: prefixlen] else tn\n wflow = CTDayend.downloadToday.s(symbol, fnPrevTcsv = fnTask, excludeMoneyFlow=exclMF) | commitToday.s()\n task = wflow()\n dictToday[symbol] = {\n 'symbol': symbol,\n 'taskFn': tn,\n 'task': task,\n 'taskId': task.id,\n 'stampIssued': datetime.now(),\n 'stampReady': None,\n 'stampCommitted': None\n }\n\n newissued.append(symbol)\n if len(newissued) >= cTasksToAdd: break\n\n thePROG.info('__refreshBatch_DownloadToday() fired %d/%d new requests: %s' % (len(newissued), len(taskfiles), ','.join(newissued)))\n if len(newissued) >0 : \n bDirty = True\n elif len(dictToday) <=0:\n del dictDownloadReqs[TODAY_YYMMDD]\n bDirty = True\n thePROG.info('__refreshBatch_DownloadToday() all DownloadReqs[%s] done, removed' % (TODAY_YYMMDD))\n\n if bDirty:\n _saveDownloadReqs(dirReqs)\n\n# ===================================================\n__lastYYMMDDs, __dtLatestQueried=[], None\n@shared_task(bind=True, base=Retryable)\ndef latestOpenDays(self, nLastDays =7):\n global __lastYYMMDDs, __dtLatestQueried\n needQuery, stampNow = False, datetime.now()\n if not __dtLatestQueried or not __lastYYMMDDs or len(__lastYYMMDDs) < nLastDays:\n needQuery = True\n elif (stampNow - __dtLatestQueried) > timedelta(hours=1):\n needQuery = True\n \n if not needQuery:\n return __lastYYMMDDs[: min(nLastDays, len(__lastYYMMDDs))]\n \n global TODAY_YYMMDD\n __lastYYMMDDs = prod.determineLastDays(thePROG, nLastDays =30)\n __dtLatestQueried = datetime.now()\n if len(__lastYYMMDDs) >0:\n TODAY_YYMMDD = __lastYYMMDDs[0]\n else :\n TODAY_YYMMDD = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')\n\n return __lastYYMMDDs[: min(nLastDays, len(__lastYYMMDDs))]\n\n# ===================================================\n@shared_task(bind=True, base=Retryable)\ndef schKickOff_DownloadToday(self):\n global TODAY_YYMMDD\n\n # lastYYMMDDs = prod.determineLastDays(thePROG, nLastDays =7)\n # if len(lastYYMMDDs) <=0:\n # return\n\n # TODAY_YYMMDD = lastYYMMDDs[0]\n\n lastYYMMDDs = latestOpenDays(nLastDays =7)\n if len(lastYYMMDDs) <=0:\n return\n\n # DIR_ARCHED_HOME = '/mnt/e/AShareSample/hpx_archived/sina' # TEST CODE\n dirReqs = os.path.join(DIR_ARCHED_HOME, SUBDIR_Reqs)\n\n try:\n os.mkdir(dirReqs)\n os.chmod(dirReqs, stat.S_IRWXU | stat.S_IRWXG |stat.S_IRWXO )\n shutil.chown(dirReqs, group ='hpx')\n except: pass\n\n dictDownloadReqs = _loadDownloadReqs(dirReqs)\n\n allSHZ = { x['symbol']: x for x in listAllSymbols() } # convert to dict\n thePROG.info('schKickOff_DownloadToday() listAllSymbols got %d symbols and last trade-days: %s' % (len(allSHZ), ','.join(lastYYMMDDs)))\n if len(allSHZ) <=2000:\n raise RetryableError(401, 'incompleted symbol list')\n\n if not TODAY_YYMMDD in __dictDownloadReqs.keys():\n # TODO cancel dictDownloadReqs[TODAY_YYMMDD]\n dictDownloadReqs[TODAY_YYMMDD] = {}\n else:\n for v in dictDownloadReqs[TODAY_YYMMDD].values():\n try :\n if 'task' in v.keys() or not v['task']: continue\n task = v['task']\n task.parent.revoke() # we only revoke the first in the chain here, always let commitToday go if its prev steps have been completed\n except: pass\n\n _saveDownloadReqs(dirReqs)\n\n cTasks =0\n for symbol in ETFs_to_COLLECT + list(allSHZ.keys()) + IDXs_to_COLLECT:\n cTasks += 1\n rfnRequest = os.path.join(SUBDIR_Reqs, 'T%s.%04d.%s.pkl.bz2' % (TODAY_YYMMDD, cTasks, symbol))\n fullfnRequest = os.path.join(DIR_ARCHED_HOME, rfnRequest)\n excludeMoneyFlow = symbol in SYMBOLS_WithNoMF\n try:\n st = os.stat(fullfnRequest)\n thePROG.debug('schKickOff_DownloadToday() %s already exists' % rfnRequest)\n continue\n except: pass\n\n thePROG.debug('schKickOff_DownloadToday() generating request-file %s' % rfnRequest)\n alllines = prod.readArchivedDays(thePROG, DIR_ARCHED_HOME, symbol, lastYYMMDDs[1:])\n todayOverview = allSHZ[symbol] if symbol in allSHZ.keys() else {}\n if 'mktcap' in todayOverview.keys() and 'close' in todayOverview.keys():\n ovclose = float(todayOverview['close'])\n if ovclose > 0.01:\n todayOverview['mktVolCap10K'] = int(float(todayOverview['mktcap'])) / ovclose\n\n # no tcsv data in the nLastDays doesn't mean it has no trades today:\n # if len(alllines) <= 100:\n # thePROG.debug('schKickOff_DownloadToday() skip empty request %s size %d' % (rfnRequest, len(alllines))\n # continue\n try:\n with bz2.open(fullfnRequest, 'wb') as f:\n f.write(pickle.dumps({'archDays':alllines, 'ov': todayOverview}))\n try:\n shutil.chown(fullfnRequest, group ='hpx')\n os.chmod(fullfnRequest, stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH )\n except: pass\n\n thePROG.debug('schKickOff_DownloadToday() generated task-file %s' % fullfnRequest)\n except Exception as ex:\n thePROG.logexception(ex, 'schKickOff_DownloadToday() write %s' % fullfnRequest)\n\n '''\n with bz2.open(fullfnRequest, 'wt', encoding='utf-8') as f:\n f.write(alllines)\n try:\n shutil.chown(fullfnRequest, group ='hpx')\n os.chmod(fullfnRequest, stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH )\n except: pass\n thePROG.debug('schKickOff_DownloadToday() generated task-file %s' % rfnRequest)\n '''\n\n __refreshBatch_DownloadToday(dirReqs, TODAY_YYMMDD)\n\n'''\n# ===================================================\n@shared_task(bind=True, base=Retryable)\ndef schDo_pitchArchiedFiles(self):\n\n listAllSymbols()\n\n nLastDays, lastDays = 7, []\n yymmddToday = (stampNow-timedelta(hours=9)).strftime('%Y%m%d')\n yymmddToday = datetime.now().strftime('%Y%m%d')\n\n playback = prod.SinaMux(thePROG)\n httperr, _, lastDays = playback.loadOnline(EVENT_KLINE_1DAY, IDXs_to_COLLECT[0], nLastDays+3)\n lastDays.reverse()\n yymmddToCache = []\n for i in lastDays:\n yymmdd = i.asof.strftime('%Y%m%d')\n if yymmdd >= yymmddToday:\n continue\n yymmddToCache.append(yymmdd)\n if len(yymmddToCache) >= nLastDays:\n break\n \n if len(yymmddToCache) <=0:\n return\n\n from dapps.sinaMaster.worker import worker as wkr\n crawlers = wkr.control.ping(timeout=2.0, queue='crawler')\n crawlers = [ list(c.keys())[0] for c in crawlers ]\n cacheFiles = [ 'SinaMF1m_%s.h5t' %i for i in yymmddToCache]\n\n for c in crawlers:\n q = c.split('@')[0]\n if not q or len(q) <=0: continue\n r = CTDayend.fetchArchivedFiles.apply_async(args=[cacheFiles], queue=q)\n thePROG.info('schDo_pitchArchiedFiles() called crawler[%s].fetchArchivedFiles: %s' % (q, ','.join(cacheFiles)))\n'''\n\n# ===================================================\n@shared_task(bind=True, max_retries=0, compression='bzip2')\ndef readArchivedDays(self, symbol, YYYYMMDDs):\n return prod.readArchivedDays(thePROG, DIR_ARCHED_HOME, symbol, YYYYMMDDs)\n\n# ===================================================\n@shared_task(bind=True, max_retries=0, compression='bzip2')\ndef readAchivedSofar(self, symbol):\n readtxn=''\n \n # mdlines\n mdlines = ''\n fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaMDay_*.h5t'))\n if len(fnList) >0 :\n fnList.sort()\n fnArch = fnList[-1]\n recentYYMMDD = os.path.basename(fnArch)[len('SinaMDay_'): -len('.h5t')]\n memName = '%s_day%s.tcsv' %(symbol, recentYYMMDD)\n try :\n mdlines = h5tar.read_utf8(fnArch, memName)\n readtxn += '%s(%dB)@%s,' % (memName, len(mdlines), fnArch)\n except:\n thePROG.error('readAchivedSofar() failed to read %s from %s' % (memName, fnArch))\n\n mux = prod.SinaMux(thePROG)\n mux.setSymbols([symbol])\n\n # kl1dlines\n fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaKL1d_*.h5t'))\n if len(fnList) >0 :\n fnList.sort()\n fnArch = fnList[-1]\n mux.loadJsonH5t(EVENT_KLINE_1DAY, symbol, fnArch)\n readtxn += '%s@%s,' % (symbol, fnArch)\n\n # mf1dlines\n fnList = glob.glob(os.path.join(MAPPED_HOME, 'archived', 'sina', 'SinaMF1d_*.h5t'))\n if len(fnList) >0 :\n fnList.sort()\n fnArch = fnList[-1]\n mux.loadJsonH5t(EVENT_MONEYFLOW_1DAY, symbol, fnArch)\n readtxn += '%s@%s,' % (symbol, fnArch)\n\n tmpfn = os.path.join(WORKDIR_CACHE, 'tmprr%s_%s.tcsv' %(symbol, datetime.now().strftime('%m%dT%H%M%S')))\n rec = thePROG.createApp(hist.TaggedCsvRecorder, filepath =tmpfn)\n rec.registerCategory(EVENT_KLINE_1DAY, params={'columns': KLineData.COLUMNS})\n rec.registerCategory(EVENT_MONEYFLOW_1DAY, params={'columns': MoneyflowData.COLUMNS})\n\n while True:\n try :\n rec.doAppStep() # to flush the recorder\n ev = next(mux)\n if not ev: break\n rec.pushRow(ev.type, ev.data)\n except StopIteration:\n break\n except Exception as ex:\n thePROG.logexception(ex)\n break\n except :\n break\n\n for i in range(10): rec.doAppStep() # to flush the recorder\n thePROG.removeApp(rec)\n tcsvlines=''\n with open(tmpfn, 'r') as f:\n tcsvlines = f.read()\n __rmfile(tmpfn)\n\n thePROG.info('readAchivedSofar() read from %s tmpfile[%s] deleted' % (readtxn, tmpfn)) \n return {\n 'symbol' : symbol,\n 'tcsv_lastday': mdlines,\n 'tcsv_1d': tcsvlines\n } # take celery's compression instead of return bz2.compress(all_lines.encode('utf8'))\n\n# ===================================================\n@shared_task(bind=True, base=Retryable)\ndef readArchivedH5t(self, h5tFileName, memberNode):\n if '.h5t' != h5tFileName[-4:]: h5tFileName+='.h5t'\n pathname = os.path.join(MAPPED_HOME, 'archived', 'sina', h5tFileName)\n pathname = '/tmp/sina_cache/' + h5tFileName\n\n k = h5tar.quote(memberNode)\n ret = None\n try :\n with h5py.File(pathname, 'r') as h5r:\n if k in h5r.keys():\n ret = h5r[k][()].tobytes()\n\n if h5tar.GNAME_TEXT_utf8 in h5r.keys():\n g = h5r[h5tar.GNAME_TEXT_utf8]\n if k in g.keys():\n ret = g[k][()].tobytes()\n\n except Exception as ex:\n thePROG.logexception(ex, 'readArchivedH5t() %s[%s]'% (h5tFileName, memberNode))\n\n if ret and len(ret) > 0:\n #typical compress-rate 1/8: ret = bz2.decompress(ret).decode('utf8')\n thePROG.info('readArchivedH5t() read %s[%s] %dB'% (h5tFileName, memberNode, len(ret)))\n else :\n thePROG.error('readArchivedH5t() read %s[%s] failed: %s'% (h5tFileName, memberNode, ret))\n return ret\n\n# ===================================================\n@shared_task(bind=True, base=Retryable)\ndef schDo_ZipWeek(self, asofYYMMDD =None):\n global DIR_ARCHED_HOME\n\n dtInWeek = None\n try :\n if isinstance(asofYYMMDD, str):\n dtInWeek = datetime.strptime(asofYYMMDD, '%Y-%m-%d')\n except:\n dtInWeek = None\n\n if not dtInWeek:\n dtInWeek = datetime.now() - timedelta(days=5)\n\n thePROG.debug('schDo_ZipWeek() start archiving the week of %s under %s' % (dtInWeek.strftime('%Y-%m-%d'), DIR_ARCHED_HOME))\n fn, lst = prod.archiveWeek(DIR_ARCHED_HOME, None, dtInWeek, thePROG)\n thePROG.info('schDo_ZipWeek() %s archived %s symbols'% (fn, len(lst)))\n\n####################################\nfrom time import sleep\nif __name__ == '__main__':\n thePROG.setLogLevel('debug')\n schKickOff_DownloadToday()\n exit(0)\n\n # readAchivedSofar('SZ300913')\n\n readArchivedDays('SZ300913', ['20210530', '20210531'])\n # readArchivedH5t('SinaMF1m_20201222.h5t', 'SZ300913_MF1m20201222.json')\n\n listAllSymbols()\n # schKickOff_DownloadToday()\n for i in range(20):\n schChkRes_Crawlers('20201231')\n sleep(10)\n\n # nTop = 1000\n # lstSHZ = topActives(nTop)\n # with open(os.path.join(MAPPED_HOME, 'hpx_publish', 'top%s_%s' % (nTop, datetime.now().strftime('%Y%m%d'))) + '.csv', 'wb') as f:\n # _writeCsv(f, lstSHZ)\n # print(lstSHZ)\n\n '''\n symbol, asofYYMMDD = 'SZ002670', '20201204'\n \n login = 'root@tc2.syscheme.com'\n fnJsons = []\n for evt in ['KL1d', 'MF1d', 'KL5m', 'MF1m']:\n fnJsons.append('%s_%s%s.json' % (symbol, evt, asofYYMMDD))\n \n today = {\n 'symbol': symbol,\n 'login': 'hxp01@test',\n 'asofYYMMDD': asofYYMMDD,\n 'fnSnapshot': '%s_sns%s.h5' % (symbol, asofYYMMDD), \n 'fnJsons': fnJsons,\n 'fnTcsv': '%s_day%s.tcsv' % (symbol, asofYYMMDD),\n 'lastDays': [\n ['20201204', 15.31, 17.5, 15.0, 15.5, 222133283.0], \n ['20201203', 15.98, 16.48, 15.5, 15.97, 176615259.0], \n ['20201202', 14.38, 14.98, 14.26, 14.98, 113319552.0], \n ['20201201', 12.41, 13.62, 11.77, 13.62, 163043226.0], \n ['20201130', 12.17, 12.72, 12.02, 12.38, 166906351.0]\n ]\n }\n\n commitToday(today)\n '''\n \n\n''' A test\nimport dapps.sinaCrawler.tasks_Dayend as ct\nimport dapps.sinaMaster.tasks_Archive as mt\nc1 = ct.downloadToday.s('SZ000002') | mt.commitToday.s()\nc1().get()\n'''\n\n", "sub_path": "src/dapps/sinaMaster/tasks_Archive.py", "file_name": "tasks_Archive.py", "file_ext": "py", "file_size_in_byte": 34068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "dapps.celeryCommon.getMappedAs", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 31, "usage_type": "call"}, {"api_name": "crawler.producesSina.listAllIndexs", "line_number": 61, "usage_type": "call"}, {"api_name": "crawler.producesSina", "line_number": 61, "usage_type": "name"}, {"api_name": "crawler.producesSina.listAllETFs", "line_number": 62, "usage_type": "call"}, {"api_name": "crawler.producesSina", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 66, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 71, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 76, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 104, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "name"}, {"api_name": "bz2.open", "line_number": 142, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 143, "usage_type": "call"}, {"api_name": "crawler.producesSina.listAllSymbols", "line_number": 148, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 148, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 148, "usage_type": "name"}, {"api_name": "dapps.celeryCommon.RetryableError", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 175, "usage_type": "name"}, {"api_name": "bz2.open", "line_number": 176, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 184, "usage_type": "call"}, {"api_name": "bz2.open", "line_number": 189, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.warn", "line_number": 192, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 192, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 195, "usage_type": "name"}, {"api_name": "bz2.open", "line_number": 196, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.warn", "line_number": 199, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 199, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 133, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 133, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.warn", "line_number": 225, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 225, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 229, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 229, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 232, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 232, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 251, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 251, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 264, "usage_type": "call"}, {"api_name": "stat.S_IRWXU", "line_number": 264, "usage_type": "attribute"}, {"api_name": "stat.S_IRWXG", "line_number": 264, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 264, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.warn", "line_number": 269, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 269, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path", "line_number": 272, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 277, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 277, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 289, "usage_type": "call"}, {"api_name": "os.path", "line_number": 289, "usage_type": "attribute"}, {"api_name": "h5tar.tar_utf8", "line_number": 291, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 292, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 292, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 295, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 295, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.logexception", "line_number": 297, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 297, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "h5tar.tar_utf8", "line_number": 302, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 303, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 303, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 306, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 306, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path", "line_number": 310, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 349, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 349, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 353, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 353, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 357, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 357, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 214, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 214, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 365, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 374, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 376, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 378, "usage_type": "call"}, {"api_name": "os.path", "line_number": 378, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 380, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 380, "usage_type": "name"}, {"api_name": "dapps.sinaCrawler.worker.worker.control.ping", "line_number": 384, "usage_type": "call"}, {"api_name": "dapps.sinaCrawler.worker.worker.control", "line_number": 384, "usage_type": "attribute"}, {"api_name": "dapps.sinaCrawler.worker.worker", "line_number": 384, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 386, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 386, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 367, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path", "line_number": 403, "usage_type": "attribute"}, {"api_name": "bz2.open", "line_number": 405, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 406, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 416, "usage_type": "call"}, {"api_name": "os.path", "line_number": 416, "usage_type": "attribute"}, {"api_name": "bz2.open", "line_number": 418, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 419, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 428, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 428, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 432, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 432, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 436, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 436, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 448, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 448, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 453, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 453, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.warn", "line_number": 458, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 458, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.logexception", "line_number": 465, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 465, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 469, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 469, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 476, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 476, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "attribute"}, {"api_name": "HistoryData.listAllFiles", "line_number": 481, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 484, "usage_type": "call"}, {"api_name": "os.path", "line_number": 484, "usage_type": "attribute"}, {"api_name": "fnmatch.fnmatch", "line_number": 485, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 502, "usage_type": "call"}, {"api_name": "os.path", "line_number": 502, "usage_type": "attribute"}, {"api_name": "dapps.sinaCrawler.tasks_Dayend.downloadToday.s", "line_number": 506, "usage_type": "call"}, {"api_name": "dapps.sinaCrawler.tasks_Dayend.downloadToday", "line_number": 506, "usage_type": "attribute"}, {"api_name": "dapps.sinaCrawler.tasks_Dayend", "line_number": 506, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 513, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 513, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 521, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 521, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 527, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 527, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 537, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 537, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 540, "usage_type": "call"}, {"api_name": "crawler.producesSina.determineLastDays", "line_number": 547, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 547, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 547, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 548, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 548, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 552, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 534, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 534, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 575, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 576, "usage_type": "call"}, {"api_name": "stat.S_IRWXU", "line_number": 576, "usage_type": "attribute"}, {"api_name": "stat.S_IRWXG", "line_number": 576, "usage_type": "attribute"}, {"api_name": "stat.S_IRWXO", "line_number": 576, "usage_type": "attribute"}, {"api_name": "shutil.chown", "line_number": 577, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 583, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 583, "usage_type": "name"}, {"api_name": "dapps.celeryCommon.RetryableError", "line_number": 585, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 603, "usage_type": "call"}, {"api_name": "os.path", "line_number": 603, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 604, "usage_type": "call"}, {"api_name": "os.path", "line_number": 604, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 607, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 608, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 608, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 612, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 612, "usage_type": "name"}, {"api_name": "crawler.producesSina.readArchivedDays", "line_number": 613, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 613, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 613, "usage_type": "name"}, {"api_name": "bz2.open", "line_number": 625, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 626, "usage_type": "call"}, {"api_name": "shutil.chown", "line_number": 628, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 629, "usage_type": "call"}, {"api_name": "stat.S_IREAD", "line_number": 629, "usage_type": "attribute"}, {"api_name": "stat.S_IWRITE", "line_number": 629, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 629, "usage_type": "attribute"}, {"api_name": "stat.S_IWGRP", "line_number": 629, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 629, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 632, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 632, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.logexception", "line_number": 634, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 634, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 557, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 557, "usage_type": "name"}, {"api_name": "crawler.producesSina.readArchivedDays", "line_number": 689, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 689, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 689, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 687, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 698, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 698, "usage_type": "call"}, {"api_name": "os.path", "line_number": 698, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 702, "usage_type": "call"}, {"api_name": "os.path", "line_number": 702, "usage_type": "attribute"}, {"api_name": "h5tar.read_utf8", "line_number": 705, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 708, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 708, "usage_type": "name"}, {"api_name": "crawler.producesSina.SinaMux", "line_number": 710, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 710, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 710, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 714, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 714, "usage_type": "call"}, {"api_name": "os.path", "line_number": 714, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 722, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 722, "usage_type": "call"}, {"api_name": "os.path", "line_number": 722, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 729, "usage_type": "call"}, {"api_name": "os.path", "line_number": 729, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 729, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 729, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.createApp", "line_number": 730, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 730, "usage_type": "name"}, {"api_name": "HistoryData.TaggedCsvRecorder", "line_number": 730, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.logexception", "line_number": 743, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 743, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.removeApp", "line_number": 749, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 749, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 755, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 755, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 692, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 766, "usage_type": "call"}, {"api_name": "os.path", "line_number": 766, "usage_type": "attribute"}, {"api_name": "h5tar.quote", "line_number": 769, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 772, "usage_type": "call"}, {"api_name": "h5tar.GNAME_TEXT_utf8", "line_number": 776, "usage_type": "attribute"}, {"api_name": "h5tar.GNAME_TEXT_utf8", "line_number": 777, "usage_type": "attribute"}, {"api_name": "dapps.sinaMaster.worker.thePROG.logexception", "line_number": 782, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 782, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 786, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 786, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.error", "line_number": 788, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 788, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 763, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 763, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 799, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 799, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 804, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 804, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 804, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG.debug", "line_number": 806, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 806, "usage_type": "name"}, {"api_name": "crawler.producesSina.archiveWeek", "line_number": 807, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 807, "usage_type": "argument"}, {"api_name": "crawler.producesSina", "line_number": 807, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.info", "line_number": 808, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 808, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 792, "usage_type": "call"}, {"api_name": "dapps.celeryCommon.Retryable", "line_number": 792, "usage_type": "name"}, {"api_name": "dapps.sinaMaster.worker.thePROG.setLogLevel", "line_number": 813, "usage_type": "call"}, {"api_name": "dapps.sinaMaster.worker.thePROG", "line_number": 813, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 826, "usage_type": "call"}]} +{"seq_id": "28002487", "text": "from __future__ import print_function\nimport numpy as np\nimport time\nimport math\n\nfrom ..box import centered_box\nfrom ..tensor import WritableTensorData as WTD, \\\n WritableTensorDataWithMask as WTDM\nfrom ..emio import imsave\n\n\ndef prepare_outputs(spec, locs, blend=False, blend_mode='', stride=None):\n blend_pool = ['', 'bump', 'aligned-bump']\n b = blend_mode.lower()\n if b not in blend_pool:\n raise RuntimeError('unknown output blend type [%s]' % b)\n\n if b == '':\n b = 'Blend'\n elif b == 'aligned-bump':\n b = 'AlignedBumpBlend'\n else:\n b = b[0].capitalize() + b[1:] + 'Blend'\n # print('blending mode: {}'.format(b))\n outputs = eval(b + '(spec, locs, blend, stride)')\n return outputs\n\n\nclass Blend(object):\n \"\"\"\n Blend interface.\n \"\"\"\n\n def __init__(self, spec, locs, blend=False, stride=None):\n \"\"\"Initialize Blend.\"\"\"\n self.spec = spec\n self.locs = locs\n self.blend = blend\n self._prepare_data()\n\n def push(self, loc, sample):\n \"\"\"Write to data.\"\"\"\n for k, v in sample.items():\n assert k in self.data\n self.data[k].set_patch(loc, v, op=self.op)\n\n def get_data(self, key):\n \"\"\"Get inference output data.\"\"\"\n assert key in self.data\n return self.data[key].get_data()\n\n def voxels(self):\n voxels = list()\n for k, v in self.data.items():\n voxels.append(np.prod(v.dim()))\n return min(voxels)\n\n ####################################################################\n ## Private Methods.\n ####################################################################\n\n def _prepare_data(self):\n \"\"\"\n TODO(kisuk): Documentation.\n \"\"\"\n assert len(self.locs) > 0\n rmin = self.locs[0]\n rmax = self.locs[-1]\n\n self.data = dict()\n self.op = None\n for k, v in self.spec.items():\n fov = v[-3:]\n a = centered_box(rmin, fov)\n b = centered_box(rmax, fov)\n c = a.merge(b)\n shape = v[:-3] + tuple(c.size())\n # Inference with overlapping window.\n if self.blend:\n self.data[k] = WTDM(shape, fov, c.min())\n self.op = 'np.add'\n else:\n self.data[k] = WTD(shape, fov, c.min())\n\n\nclass BumpBlend(Blend):\n \"\"\"\n Blending with bump function.\n \"\"\"\n\n def __init__(self, spec, locs, blend=False, stride=None):\n \"\"\"Initialize BumpBlend.\"\"\"\n super().__init__(spec, locs, blend=blend, stride=stride)\n\n self.logit_maps = dict()\n\n # Inference with overlapping window.\n self.max_logits = None\n if blend:\n max_logits = dict()\n # Compute max_logit for numerical stability.\n for k, v in self.data.items():\n fov = tuple(v.fov())\n data = np.full(v.dim(), -np.inf, dtype='float32')\n max_logit = WTD(data, fov, v.offset())\n max_logit_window = self._bump_logit_map(fov)\n for loc in self.locs:\n max_logit.set_patch(loc, max_logit_window, op='np.maximum')\n max_logits[k] = max_logit\n self.max_logits = max_logits\n\n def push(self, loc, sample):\n \"\"\"Blend with data.\"\"\"\n for k, v in sample.items():\n assert k in self.data\n t0 = time.time()\n mask = self.get_mask(k, loc)\n t1 = time.time() - t0\n self.data[k].set_patch(loc, v, op=self.op, mask=mask)\n t2 = time.time() - t0\n print('get_mask: %.3f, set_patch: %.3f' % (t1, t2-t1))\n\n def get_mask(self, key, loc):\n mask = None\n if self.blend:\n assert key in self.max_logits\n max_logit = self.max_logits[key].get_patch(loc)\n mask = self._bump_map(max_logit.shape[-3:], max_logit[0, ...])\n return mask\n\n ####################################################################\n ## Private methods.\n ####################################################################\n\n def _bump_logit(self, z, y, x, t=1.5):\n return -(x*(1-x))**(-t)-(y*(1-y))**(-t)-(z*(1-z))**(-t)\n\n def _bump_logit_map(self, dim):\n ret = self.logit_maps.get(dim)\n if ret is None:\n x = range(dim[-1])\n y = range(dim[-2])\n z = range(dim[-3])\n zv, yv, xv = np.meshgrid(z, y, x, indexing='ij')\n xv = (xv+1.0)/(dim[-1]+1.0)\n yv = (yv+1.0)/(dim[-2]+1.0)\n zv = (zv+1.0)/(dim[-3]+1.0)\n ret = self._bump_logit(zv, yv, xv)\n self.logit_maps[dim] = ret\n return ret\n\n def _bump_map(self, dim, max_logit):\n return np.exp(self._bump_logit_map(dim) - max_logit)\n\n\nclass AlignedBumpBlend(Blend):\n \"\"\"\n Blending with bump function with aligned patches.\n \"\"\"\n def __init__(self, spec, locs, blend=True, stride=None):\n \"\"\"Initialize BumpBlend.\"\"\"\n # note that the blend mode is always False in parent class to avoid\n # using the chunk-wise mask\n super().__init__(spec, locs, False)\n\n self.patch_masks = dict()\n # always add the patches, this will take effect in the push\n # functions of Blend class\n for k, v in self.data.items():\n fov = v.fov()\n\n assert stride\n if all(np.less_equal(stride, 1.0)):\n # this is in percentile, need to transform to voxel based\n fov = list(self.data.values()).fov()\n stride_by_voxel = (f-math.round(f*s) for (f, s) in\n zip(fov, stride))\n else:\n stride_by_voxel = stride\n print('stride: {}'.format(stride))\n assert all(np.greater_equal(stride_by_voxel, 1))\n\n mask = self._make_mask(fov, stride_by_voxel)\n assert np.less_equal(mask, 1.0).all()\n self.patch_masks[k] = mask\n\n self._save_mask()\n\n def push(self, loc, sample):\n \"\"\"Write to data.\"\"\"\n for k, v in sample.items():\n t0 = time.time()\n np.multiply(v, self.patch_masks[k], v)\n self.data[k].set_patch(loc, v, op='np.add')\n t1 = time.time() - t0\n print('blending: %.3f sec' % t1)\n\n ####################################################################\n ## Private methods.\n ####################################################################\n def _save_mask(self):\n for k, v in self.patch_masks.items():\n imsave(v, '/tmp/patch_mask_{}.tif'.format(k))\n\n def _make_mask(self, fov, stride_by_voxel):\n \"\"\"\n _make_mask( size )\n params:\n size:tuple of int\n return:\n an numpy array with data type of float32. The value was generated\n using a bump function. the overlapping borders and corners were\n normalized according to weight accumulation.\n https://en.wikipedia.org/wiki/Bump_function\n \"\"\"\n stride = stride_by_voxel\n bump_map = self._make_bump_map(fov)\n # use 3x3x3 mask addition to figure out the normalization parameter\n # this is a simulation of blending\n base_mask = np.zeros(tuple(f+2*s for (f, s) in zip(fov, stride)),\n dtype='float64')\n print('fov: {}, stride: {}'.format(fov, stride))\n print('shape of base mask: {}'.format(base_mask.shape))\n for nz in range(3):\n for ny in range(3):\n for nx in range(3):\n base_mask[nz*stride[0]:nz*stride[0]+fov[0],\n ny*stride[1]:ny*stride[1]+fov[1],\n nx*stride[2]:nx*stride[2]+fov[2]] += bump_map\n\n bump_map /= base_mask[stride[0]:stride[0]+fov[0],\n stride[1]:stride[1]+fov[1],\n stride[2]:stride[2]+fov[2]]\n\n return np.asarray(bump_map, dtype='float32')\n\n def _make_bump_map(self, dim):\n x = range(dim[-1])\n y = range(dim[-2])\n z = range(dim[-3])\n zv, yv, xv = np.meshgrid(z, y, x, indexing='ij')\n xv = (xv+1.0)/(dim[-1]+1.0) * 2.0 - 1.0\n yv = (yv+1.0)/(dim[-2]+1.0) * 2.0 - 1.0\n zv = (zv+1.0)/(dim[-3]+1.0) * 2.0 - 1.0\n bump_map = np.exp(-1.0/(1.0-xv*xv) +\n -1.0/(1.0-yv*yv) +\n -1.0/(1.0-zv*zv))\n return np.asarray(bump_map, dtype='float64')\n", "sub_path": "python/dataprovider/inference/blend.py", "file_name": "blend.py", "file_ext": "py", "file_size_in_byte": 8556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "numpy.prod", "line_number": 55, "usage_type": "call"}, {"api_name": "box.centered_box", "line_number": 74, "usage_type": "call"}, {"api_name": "box.centered_box", "line_number": 75, "usage_type": "call"}, {"api_name": "tensor.WritableTensorDataWithMask", "line_number": 80, "usage_type": "call"}, {"api_name": "tensor.WritableTensorData", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensor.WritableTensorData", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 116, "usage_type": "call"}, {"api_name": "time.time", "line_number": 118, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.less_equal", "line_number": 173, "usage_type": "call"}, {"api_name": "math.round", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.less_equal", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 193, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "emio.imsave", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "125082187", "text": "#! usr/bin/env python3\n#encoding: utf-8\n\nimport functools\n\ndef log(text=None):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args,**kw):\n if isinstance(text,str):\n print('%s %s()'%(text,func.__name__))\n _func=func(*args,**kw)\n else:\n print('%s()'%func.__name__)\n _func=func(*args,**kw)\n return _func\n return wrapper\n if isinstance(text,(int,str)):\n return decorator\n else:\n return decorator(text)\n\n@log\ndef time():\n print('2017-06-21')\n\nif __name__=='__main__':\n time()\n", "sub_path": "decorator.py", "file_name": "decorator.py", "file_ext": "py", "file_size_in_byte": 627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "functools.wraps", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "367475446", "text": "import xml.etree.ElementTree as ET\nimport cv2\nimport numpy as np\nimport os\nimport glob\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nglobal radius\n\nradius = 5\n\n\ndef visualize_hsv(flow, name):\n flow = flow.astype(\"float32\")\n hsv = np.zeros((flow.shape[0], flow.shape[1], 3))\n hsv[..., 1] = 255\n # flowの大きさと角度を計算\n mag, ang = cv2.cartToPolar(flow[..., 1], flow[..., 0])\n # OpenCVのhueは0~180で表現\n hsv[..., 0] = ang * 180 / np.pi / 2\n # 強さを0~255で正規化\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n # これないとエラー\n hsv = hsv.astype(\"uint8\")\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imwrite(name, rgb)\n\n\n# 各座標のベクトル本数とベクトルを返す\ndef compute_vector(black, pre, nxt, result, result_y, result_x, sgm):\n # v' = p(t+1) - p(t)\n # 単位ベクトル化(v = v'/|v'|)\n v = nxt - pre\n if np.linalg.norm(v) != 0:\n v = v / np.linalg.norm(v)\n # 法線ベクトル上下の定義してsgm倍(sgmはハイパーパラメータ)\n up = np.array([-v[1], v[0]]) * sgm\n dw = np.array([v[1], -v[0]]) * sgm\n # (p(t)の座標 or p(t+1)の座標)と法線ベクトル2種の和\n v1 = up + nxt + radius\n v2 = dw + nxt + radius\n v3 = up + pre + radius\n v4 = dw + pre + radius\n # p(t+1)とp(t)を結ぶ線分を囲む4点\n points = np.round(\n np.array([[v1[0], v1[1]], [v2[0], v2[1]], [v4[0], v4[1]], [v3[0], v3[1]]])\n )\n img_t = black.copy()\n img_y = black.copy()\n img_x = black.copy()\n img_z = black.copy()\n # points4点で囲む領域を1に\n img_t = cv2.fillPoly(img=img_t, pts=np.int32([points]), color=1)\n # img_t = cv2.circle(img_t, (pre[0] + radius, pre[1] + radius), radius, (1), thickness=-1, lineType=cv2.LINE_4)\n # img_t = cv2.circle(img_t, (nxt[0] + radius, nxt[1] + radius), radius, (1), thickness=-1, lineType=cv2.LINE_4)\n # v = nxt - pre\n # v = np.append(v, 1)\n # v = v / np.linalg.norm(v)\n img_y[img_t != 0] = v[1]\n img_x[img_t != 0] = v[0]\n # img_z[img_t != 0] = v[2]\n # どんどんベクトル追加\n result = result + img_t\n # ベクトルもとりあえず和でOK(あとで平均取る)\n result_x = result_x + img_x\n result_y = result_y + img_y\n # result_z = result_z + img_z\n return result, result_y, result_x\n\n\ndef generate_flow(track_let, save_path, itv=1, height=1040, width=1392):\n track_let = track_let.astype(int)\n i = np.unique(track_let[:, 0])[0]\n ids = np.unique(track_let[:, 1])\n\n output = []\n\n # いろいろ使う黒画像(0行列)\n black = np.zeros((height + radius * 2, width + radius * 2, 1))\n par_id = -1\n\n # resultは各座標に何本ベクトルがあるか(かぶっていたら2とか3とか)\n # result_y,result_x は出力ベクトル\n result = black.copy()\n result_y = black.copy()\n result_x = black.copy()\n for j in ids:\n # i+1のフレーム\n index_check = len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)])\n index_chnxt = len(\n track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)]\n )\n if index_chnxt != 0:\n par_id = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][\n 0, -1\n ]\n\n # 前後のframeがあるとき(dataはframe(t)の座標、dnxtはframe(t+1)の座標)\n if (index_check != 0) & (index_chnxt != 0):\n data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)][0]\n dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0]\n pre = data[2:-1]\n nxt = dnxt[2:-1]\n result, result_y, result_x = compute_vector(\n black, pre, nxt, result, result_y, result_x, SGM\n )\n\n # 前は無いが、親がいるとき\n elif (index_check == 0) & (index_chnxt != 0) & (par_id != -1):\n # 親細胞のframe(t)座標\n if (\n len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)])\n != 0\n ):\n data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)][\n 0\n ]\n dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][\n 0\n ]\n pre = data[2:-1]\n nxt = dnxt[2:-1]\n result, result_y, result_x = compute_vector(\n black, pre, nxt, result, result_y, result_x, SGM\n )\n else:\n print(\n track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0]\n )\n\n # パディングを消す\n result = result[radius:-radius, radius:-radius]\n print(i, \"to\", i + itv, result.max())\n\n # 0で割れないので1に\n result_org = result.copy()\n result[result == 0] = 1\n # パディングを消す\n result_y = result_y[radius:-radius, radius:-radius]\n result_x = result_x[radius:-radius, radius:-radius]\n # result_z = result_z[radius:-radius, radius:-radius]\n result_x = result_x / result\n result_y = result_y / result\n # result_z = (result_z / result)\n result_vector = np.concatenate((result_y, result_x), axis=-1)\n visualize_hsv(\n result_vector, str(save_path.parent.joinpath(save_path.name + \".png\"))\n )\n # save_npy = save_path + '/{0:03d}.npy'.format(i)\n # np.save(save_npy, result_vector.astype('float16'))\n output.append(result_vector)\n np_output = np.array(output).astype(\"float16\")\n np.save(str(save_path), np_output)\n\n\n############################################################################################\nSGM = 5 # CMFの幅/2の値\n############################################################################################\nif __name__ == \"__main__\":\n seqs = [13]\n time_lates = [1, 5, 9]\n for time_late in time_lates:\n for seq in seqs:\n save_CMP_path = Path(f\"/home/kazuya/main/weakly_tracking/images/sequ{seq}/CMF_6_{time_late}\")\n\n save_mask_path = save_CMP_path.parent.joinpath(f\"mask_{time_late}\")\n save_CMP_path.mkdir(parents=True, exist_ok=True)\n save_mask_path.mkdir(parents=True, exist_ok=True)\n\n root_path = Path(f\"../output/association/C2C12_9_{time_late}/sequ{seq}\")\n\n pred1_paths = sorted(root_path.glob(\"*/*_1.txt\"))\n\n pred2_paths = sorted(root_path.glob(\"*/*_2.txt\"))\n\n for frame, pred_path in enumerate(zip(pred1_paths, pred2_paths)):\n # [x, y, cell_id, state]\n pred1 = np.loadtxt(str(pred_path[0]), delimiter=\",\", skiprows=1)\n # [x, y, cell_id, state]\n pred2 = np.loadtxt(str(pred_path[1]), delimiter=\",\", skiprows=1)\n\n track_let = np.zeros(((pred1.shape[0] + pred2.shape[0], 5)))\n track_let[pred2.shape[0]:, 0] = 2\n track_let[: pred2.shape[0], 0] = 1\n track_let[pred2.shape[0]:, 2:4] = pred1[:, :2]\n track_let[: pred2.shape[0], 2:4] = pred2[:, :2]\n track_let[:, -1] = -1\n track_let[:, 1] = -1\n for index, pre in enumerate(pred1):\n track_let[int(pred2.shape[0] + index), 1] = index\n if pre[3] != -1:\n track_let[int(pre[2]), 1] = index\n track_let = track_let[track_let[:, 1] != -1]\n\n exclude_cells = pred1[pred1[:, 3] == 2]\n mask = np.zeros((512, 512))\n for exclude_cell in exclude_cells:\n mask = cv2.circle(\n mask,\n (int(exclude_cell[1]), int(exclude_cell[2])),\n SGM * 3,\n 255,\n -1,\n )\n\n exclude_cells = pred2[pred2[:, 3] == 0]\n for exclude_cell in exclude_cells:\n mask = cv2.circle(\n mask,\n (int(exclude_cell[1]), int(exclude_cell[2])),\n SGM * 3,\n 255,\n -1,\n )\n\n\n\n cv2.imwrite(\n str(save_mask_path.joinpath(f\"{frame:05d}.tif\")),\n mask.astype(np.uint8),\n )\n\n track_let = track_let[track_let[:, 1] != -1]\n\n generate_flow(\n track_let,\n save_CMP_path.joinpath(f\"{frame:05d}\"),\n height=512,\n width=512,\n )\n print(\"finished\")\n", "sub_path": "utils/cmf_gen_pseudo.py", "file_name": "cmf_gen_pseudo.py", "file_ext": "py", "file_size_in_byte": 8767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cartToPolar", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.normalize", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 154, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 165, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 197, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 209, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 221, "usage_type": "attribute"}]} +{"seq_id": "323630674", "text": "import collections\nfrom typing import Deque\nimport re #정규표현식 불러오기\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n strs = []\n for char in s:\n if char.isalnum(): # isalnum(): 영문자, 숫자 여부 판별하여 False, True 변환\n strs.append(char.lower()) # 모든 문자 소문자 변환하여 str에 입력\n print('문자 처리: ', strs)\n\n # 팰린드롬 여부 판별\n while len(strs) > 1: # strs의 길이가 1 이상이면 반복\n\n # pop(0): 맨 앞의 값, pop(): 맨 뒤의 값을 가져옴\n if strs.pop(0) != strs.pop():\n return False\n\n def isPalindrome1(self, s: str) -> bool:\n\n # 자료형 데크로 선언\n strs: Deque = collections.deque() # 데크 생성\n print('\\n데크 생성: ', strs)\n\n for char in s:\n if char.isalnum():\n strs.append(char.lower())\n print('문자 처리: ', strs)\n\n while len(strs) > 1:\n if strs.popleft() != strs.pop(): # 데크의 popleft()는 O(1), 리스트의 pop(0)이 O(n)\n return False\n return True\n\n def isPalindrome2(self, s: str) -> bool:\n s = s.lower()\n # 정규식으로 불필요한 문자 필터링: re.sub(''정규표현식', 대상 문자열, 치환 문자)\n s = re.sub('[^a-z0-9]', '', s) #s 중, 알파벳과 숫자가 아닌 것을 ''로 바꿔라\n print('\\n문자 처리: ', s)\n\n return s == s[::-1] # 슬라이싱 [::-1]: 배열 뒤집기", "sub_path": "python_algorithm/python_algorithm_06/Array/isPalindrome.py", "file_name": "isPalindrome.py", "file_ext": "py", "file_size_in_byte": 1593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "typing.Deque", "line_number": 23, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 23, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "514071216", "text": "#!/usr/bin/env python\nimport json\nfrom pprint import pprint\nfrom robot_localization.srv import SetPose\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom mavros_msgs.srv import StreamRate\nfrom std_msgs.msg import Bool\nimport sys\nimport rospy\nimport importlib\n\nclass TaskPlanner:\n \n NODE_NAME = 'task_planner'\n \n # REFACTOR THIS\n CONTINUE = 1\n FINISHED = 2\n \n def __init__(self):\n rospy.init_node(self.NODE_NAME, log_level=rospy.INFO)\n\n plans_filename = sys.argv[1]\n tasks_path = sys.argv[2]\n self.plan_name = sys.argv[3]\n\n sys.path.append(tasks_path)\n\n with open(plans_filename) as plans_file:\n self.masterplan = json.load(plans_file)\n \n self.init_tasks(self.masterplan)\n self.plan = self.init_plan(self.masterplan, self.plan_name)\n\n self.disable_x = rospy.Publisher('/global_x/pid_enable', Bool, queue_size=10)\n self.disable_y = rospy.Publisher('/global_y/pid_enable', Bool, queue_size=10)\n self.disable_z = rospy.Publisher('/global_z/pid_enable', Bool, queue_size=10)\n self.disable_roll = rospy.Publisher('/global_roll/pid_enable', Bool, queue_size=10)\n self.disable_pitch = rospy.Publisher('/global_pitch/pid_enable', Bool, queue_size=10)\n self.disable_yaw = rospy.Publisher('/global_yaw/pid_enable', Bool, queue_size=10)\n \n \n def init_tasks(self, masterplan):\n self.tasks = []\n for task_info in masterplan['tasks']:\n rospy.loginfo('Initializing task ' + task_info['name'])\n task = getattr(importlib.import_module(task_info['modulename']), task_info['classname'])()\n self.tasks.append(task)\n \n def init_plan(self, masterplan, plan_name):\n target_plan = None\n for plan in masterplan['plans']:\n if plan['name'] == plan_name:\n target_plan = plan\n break\n \n if target_plan is None:\n raise Exception('Plan ' + plan_name + ' not found')\n \n task_names = target_plan['tasks']\n self.tasks_plan = map(self._get_task_from_name, task_names)\n \n def _get_task_from_name(self, name):\n rospy.loginfo('Getting task for name ' + name)\n return list(filter(lambda task: task.name == name, self.tasks))[0]\n \n def run(self):\n rospy.wait_for_service('/set_pose')\n sp = rospy.ServiceProxy('/set_pose', SetPose)\n zero_pose = PoseWithCovarianceStamped()\n zero_pose.pose.pose.orientation.w = 1\n #sp(zero_pose)\n\n rospy.wait_for_service('/mavros/set_stream_rate')\n ssr = rospy.ServiceProxy('/mavros/set_stream_rate', StreamRate)\n ssr(0, 15, 1)\n\n rate = rospy.Rate(15)\n for task in self.tasks_plan:\n rospy.loginfo('Starting task: ' + task.name)\n task.pre_run_base()\n task.pre_run()\n while not rospy.is_shutdown():\n result = task.run()\n if result == self.CONTINUE:\n pass\n elif result == self.FINISHED:\n break\n rate.sleep()\n \n self.disable_pid()\n \n def disable_pid(self):\n self.disable_x.publish(False)\n self.disable_y.publish(False)\n self.disable_z.publish(False)\n self.disable_roll.publish(False)\n self.disable_pitch.publish(False)\n self.disable_yaw.publish(False)\n\n\nif __name__ == '__main__':\n TaskPlanner().run()\n", "sub_path": "catkin_ws/src/task_planning/scripts/task_planner.py", "file_name": "task_planner.py", "file_ext": "py", "file_size_in_byte": 3511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rospy.init_node", "line_number": 21, "usage_type": "call"}, {"api_name": "rospy.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 35, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 35, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 36, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 36, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 37, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 37, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 38, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 38, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 39, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 39, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 40, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 40, "usage_type": "argument"}, {"api_name": "rospy.loginfo", "line_number": 46, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 64, "usage_type": "call"}, {"api_name": "rospy.wait_for_service", "line_number": 68, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 69, "usage_type": "call"}, {"api_name": "robot_localization.srv.SetPose", "line_number": 69, "usage_type": "argument"}, {"api_name": "geometry_msgs.msg.PoseWithCovarianceStamped", "line_number": 70, "usage_type": "call"}, {"api_name": "rospy.wait_for_service", "line_number": 74, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 75, "usage_type": "call"}, {"api_name": "mavros_msgs.srv.StreamRate", "line_number": 75, "usage_type": "argument"}, {"api_name": "rospy.Rate", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 80, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "96202310", "text": "import json\nimport pickle\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom typing import Dict, Tuple\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.metrics import mean_squared_error\n\ndef rmse(a, b):\n return np.sqrt(mean_squared_error(a, b))\n\n\ndef load_model(model_file: Path) -> GradientBoostingRegressor:\n model: GradientBoostingRegressor = pickle.loads(model_file.read_bytes())\n return model\n\n\ndef load_df(folder: Path) -> DataFrame:\n \"\"\"Load prepared data into dataframe from folder\n\n Args:\n folder (Path): folder containing data.csv\n\n Returns:\n DataFrame: prepared dataframe\n \"\"\"\n return pd.read_csv(folder/\"data.csv\")\n\n\ndef load_data_for_model(original_data: DataFrame) -> Tuple[DataFrame, DataFrame]:\n \"\"\"Load model-specific version of data (with addition type transformations etc.)\n\n Args:\n original_data (DataFrame): original dataframe\n\n Returns:\n Tuple[DataFrame, DataFrame]: (X, y) dataframes ready for model.fit()\n \"\"\"\n columns_to_drop = original_data.columns[\n # sklearn GradientBoostingRegressor does not handle strings\n original_data.columns.str.contains(\"_name\")\n ]\n original_data = original_data.drop(columns=columns_to_drop)\n X = original_data.drop(columns=[\"item_cnt_month\"])\n y = original_data[\"item_cnt_month\"]\n return X, y\n\n\ndef load_test_range(test_folder: Path):\n return pd.read_csv(test_folder/\"test.csv\", index_col=[\"shop_id\", \"item_id\"])\n\n\ndef extend_target_df(test_range: DataFrame, val_df: DataFrame, prediction: np.ndarray) -> DataFrame:\n target_df = test_range.join(val_df.set_index([\"shop_id\", \"item_id\"]).assign(prediction=prediction)).assign(\n date_block_num=(24 + 9), \n item_cnt_month=lambda df: df.item_cnt_month.fillna(0),\n prediction=lambda df: df.prediction.fillna(0),\n date_year=2015, \n date_month=9,\n )\n return target_df\n\n\ndef evaluate_model(model_file: Path, val_folder: Path, test_folder: Path) -> Dict:\n \"\"\"Evaluate model\n\n Args:\n model_file (Path): path to mode pickle file\n\n Returns:\n Dict: metrics\n \"\"\"\n model = load_model(model_file)\n val_df = load_df(val_folder)\n X_val, _ = load_data_for_model(val_df)\n prediction = model.predict(X_val)\n test_range = load_test_range(test_folder)\n extended = extend_target_df(test_range, val_df, prediction)\n return {\n \"rmse\": rmse(extended.item_cnt_month, extended.prediction)\n }\n\n\ndef write_metrics(metrics: Dict, file: Path):\n file.write_text(json.dumps(metrics))\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"val_folder\", type=Path)\n parser.add_argument(\"test_folder\", type=Path)\n parser.add_argument(\"--model_file\", type=Path, default=Path(\"model.pkl\"))\n parser.add_argument(\"--metrics_file\", type=Path,\n default=Path(\"metrics.json\"))\n\n args = parser.parse_args()\n\n metrics = evaluate_model(model_file=args.model_file, \n val_folder=args.val_folder,\n test_folder=args.test_folder)\n write_metrics(metrics=metrics, file=args.metrics_file)", "sub_path": "code/src/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 3225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.sqrt", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 18, "usage_type": "name"}, {"api_name": "pickle.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 17, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 88, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 88, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 93, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 94, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 95, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 97, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "63604089", "text": "from common import *\nimport autograd.numpy as np\nimport matplotlib.pyplot as plt\nimport autograd.numpy.random as rng\nfrom autograd.numpy.random import multivariate_normal as rmvn\nfrom autograd.numpy.linalg import cholesky, solve\nfrom autograd.scipy.linalg import cholesky as chol\nfrom autograd.scipy.linalg import solve_triangular as solve_tri\nimport cov\n\n# Perform inference in the 0-mean GP specified by the covariance function fcov\n# and observation noise s2n.\n#\n# Inputs:\n# X - observation inputs. (N)\n# y - observation outputs. (N)\n# fcov - (stationary) covariance function.\n# s2n - observation noise.\n#\n# Outputs:\n# posterior - function which accepts new inputs and computes functions to\n# compute the posterior distribution at these points.\n# lml - function to compute the log marginal likelihood of the data log p(y | X).\n#\ndef infer(X, y, fcov, s2n):\n \n # Compute suff. stats for posterior prediction. Follows conventions from\n # page 19 of GPforML (Rasmussen and Williams).\n N = y.shape[0]\n Kxx = fcov(X) + s2n * np.eye(N)\n L = chol(Kxx, lower=True)\n alpha = solve_tri(L, solve_tri(L, y, lower=True), lower=True, trans='T')\n\n # Define function to make posterior predictions at new data.\n def posterior(Xs):\n Ks_diag, Ksx, Kss = fcov(Xs, diag=True), fcov(Xs, Z=X), fcov(Xs)\n\n # Return function to compute posterior means.\n def mu():\n return np.dot(Ksx, alpha)\n\n # Return function to compute posterior marginal variances.\n def s2():\n Ns = Xs.shape[0]\n s2out = np.empty(Ns)\n for j in range(Ns):\n v = solve_tri(L, Ksx[j], lower=True)\n s2out[j] = np.dot(v, v)\n return Ks_diag - s2out\n\n # Return the full posterior covariance.\n def Sigma():\n B = solve_tri(L, Ksx.T, lower=True)\n return Kss - np.dot(B.T, B)\n return mu, s2, Sigma\n\n # Compute the log marginal likelihood of the data.\n def lml():\n return -0.5*(N*log2pi() + 2*np.sum(np.log(np.diag(L))) + np.dot(y, alpha))\n \n # Functions to compute posterior predictive and log marginal likelihood.\n return posterior, lml\n\n\ndef main():\n\n # Define the covariance function.\n print('Define covariance function.')\n pars = {'l2h' : np.log(np.exp(1.0) - 1.0), 's2h' : np.log(np.exp(1.0) - 1.0)}\n fcov = cov.factory(cov.eq, pars)\n\n # Generate some data.\n print('Generate toy data.')\n rng.seed(15485863)\n lb, ub, N, s2n = 0.0, 10.0, 250, 1e-1\n X1 = rng.uniform(low=lb, high=ub / 3, size=N / 2)\n X2 = rng.uniform(low=ub * 2.0 / 3.0, high=ub, size=N / 2)\n X = rng.permutation(np.hstack([X1, X2]))\n X = rng.uniform(low=lb, high=ub, size=N)\n X = np.linspace(lb, ub, N)\n y = rmvn(np.zeros(N), fcov(X, X) + s2n * np.eye(N))\n posterior = infer(X, y, fcov, s2n)\n\n Ns, delta = 500, 5.0\n Xs = np.linspace(lb - delta, ub + delta, Ns) \n mu, s2, Sigma = posterior(Xs)\n muX, sX = mu(), np.sqrt(s2())\n plt.plot(Xs, muX, 'b', Xs, muX + 2 * sX, 'b--', Xs, muX - 2 * sX, 'b--',\\\n X, y, 'rx')\n plt.figure()\n plt.imshow(np.log(Sigma() + 1e-3))\n plt.colorbar()\n plt.show()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "exp/circgp/gpexact.py", "file_name": "gpexact.py", "file_ext": "py", "file_size_in_byte": 3065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "autograd.numpy.eye", "line_number": 30, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 30, "usage_type": "name"}, {"api_name": "autograd.scipy.linalg.cholesky", "line_number": 31, "usage_type": "call"}, {"api_name": "autograd.scipy.linalg.solve_triangular", "line_number": 32, "usage_type": "call"}, {"api_name": "autograd.numpy.dot", "line_number": 40, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 40, "usage_type": "name"}, {"api_name": "autograd.numpy.empty", "line_number": 45, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 45, "usage_type": "name"}, {"api_name": "autograd.scipy.linalg.solve_triangular", "line_number": 47, "usage_type": "call"}, {"api_name": "autograd.numpy.dot", "line_number": 48, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 48, "usage_type": "name"}, {"api_name": "autograd.scipy.linalg.solve_triangular", "line_number": 53, "usage_type": "call"}, {"api_name": "autograd.numpy.dot", "line_number": 54, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 54, "usage_type": "name"}, {"api_name": "autograd.numpy.sum", "line_number": 59, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 59, "usage_type": "name"}, {"api_name": "autograd.numpy.log", "line_number": 59, "usage_type": "call"}, {"api_name": "autograd.numpy.diag", "line_number": 59, "usage_type": "call"}, {"api_name": "autograd.numpy.dot", "line_number": 59, "usage_type": "call"}, {"api_name": "autograd.numpy.log", "line_number": 69, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 69, "usage_type": "name"}, {"api_name": "autograd.numpy.exp", "line_number": 69, "usage_type": "call"}, {"api_name": "cov.factory", "line_number": 70, "usage_type": "call"}, {"api_name": "cov.eq", "line_number": 70, "usage_type": "attribute"}, {"api_name": "autograd.numpy.random.seed", "line_number": 74, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 74, "usage_type": "name"}, {"api_name": "autograd.numpy.random.uniform", "line_number": 76, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 76, "usage_type": "name"}, {"api_name": "autograd.numpy.random.uniform", "line_number": 77, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 77, "usage_type": "name"}, {"api_name": "autograd.numpy.random.permutation", "line_number": 78, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 78, "usage_type": "name"}, {"api_name": "autograd.numpy.hstack", "line_number": 78, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 78, "usage_type": "name"}, {"api_name": "autograd.numpy.random.uniform", "line_number": 79, "usage_type": "call"}, {"api_name": "autograd.numpy.random", "line_number": 79, "usage_type": "name"}, {"api_name": "autograd.numpy.linspace", "line_number": 80, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 80, "usage_type": "name"}, {"api_name": "autograd.numpy.random.multivariate_normal", "line_number": 81, "usage_type": "call"}, {"api_name": "autograd.numpy.zeros", "line_number": 81, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 81, "usage_type": "name"}, {"api_name": "autograd.numpy.eye", "line_number": 81, "usage_type": "call"}, {"api_name": "autograd.numpy.linspace", "line_number": 85, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 85, "usage_type": "name"}, {"api_name": "autograd.numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "autograd.numpy.log", "line_number": 91, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "318137463", "text": "\"\"\"This module implements a two-stage HMAX-like model.\n\nThis module implements a multi-scale analysis by applying single-scale Gabors to\na scale pyramid of the input image. This is similar to the configuration used by\nMutch & Lowe (2008).\n\n\"\"\"\n\n# Copyright (c) 2011 Mick Thomure\n# All rights reserved.\n#\n# Please see the file COPYING in this distribution for usage terms.\n\nfrom scipy.ndimage.interpolation import zoom\n\nfrom glimpse.models.misc import BaseState, Whiten\nfrom glimpse.models.viz2.model import Model as Viz2Model\nfrom glimpse.models.viz2.model import Layer\nfrom glimpse.util import kernel\nfrom .params import Params\n\nclass State(BaseState):\n \"\"\"A container for the :class:`Model` state.\"\"\"\n pass\n\nclass Model(Viz2Model):\n \"\"\"Create a 2-part, HMAX-like hierarchy of S+C layers.\"\"\"\n\n #: The datatype associated with layer descriptors for this model.\n LayerClass = Layer\n\n #: The parameters type associated with this model.\n ParamClass = Params\n\n #: The datatype associated with network states for this model.\n StateClass = State\n\n @property\n def s1_kernel_shape(self):\n \"\"\"The expected shape of the S1 kernels array, including band structure.\n\n :rtype: tuple of int\n\n \"\"\"\n p = self.params\n return p.s1_num_orientations, p.s1_num_phases, p.s1_kwidth, p.s1_kwidth\n\n @property\n def s1_kernels(self):\n \"\"\"The set of S1 kernels, which is generated if not set.\n\n :returns: S1 kernels indexed by orientation, and phase.\n :rtype: 4D ndarray of float\n\n \"\"\"\n # if kernels array is empty, then generate it using current model parameters\n if self._s1_kernels == None:\n p = self.params\n self._s1_kernels = kernel.MakeGaborKernels(\n kwidth = p.s1_kwidth,\n num_orientations = p.s1_num_orientations,\n num_phases = p.s1_num_phases, shift_orientations = True,\n scale_norm = self.s1_kernels_are_normed)\n return self._s1_kernels\n\n def BuildS1FromRetina(self, retina):\n \"\"\"Apply S1 processing to some existing retinal layer data.\n\n .. note::\n\n This method pools over phase, so the output has only scale and\n orientation bands.\n\n :param retina: Result of retinal layer processing.\n :type retina: 2D ndarray of float\n :return: S1 maps indexed by scale and orientation.\n :rtype: list of 3D ndarray of float\n\n \"\"\"\n # Create scale pyramid of retinal map\n p = self.params\n retina_scales = [ zoom(retina, 1 / p.scale_factor ** scale)\n for scale in range(p.num_scales) ]\n # Reshape kernel array to be 3-D: index, 1, y, x\n s1_kernels = self.s1_kernels.reshape((-1, 1, p.s1_kwidth, p.s1_kwidth))\n s1s = []\n backend_op = getattr(self.backend, p.s1_operation)\n for scale in range(p.num_scales):\n # Reshape retina to be 3D array\n retina = retina_scales[scale]\n retina_ = retina.reshape((1,) + retina.shape)\n s1_ = backend_op(retina_, s1_kernels, bias = p.s1_bias, beta = p.s1_beta,\n scaling = p.s1_sampling)\n # Reshape S1 to be 4D array\n s1 = s1_.reshape((p.s1_num_orientations, p.s1_num_phases) + \\\n s1_.shape[-2:])\n # Pool over phase.\n s1 = s1.max(1)\n # Append 3D array to list\n s1s.append(s1)\n return s1s\n\n def BuildC1FromS1(self, s1s):\n \"\"\"Compute the C1 layer activity from multi-scale S1 activity.\n\n :param s1s: S1 maps indexed by scale.\n :type s1s: list of 3D ndarray of float, or 4D ndarray of float\n :returns: C1 maps indexed by scale and orientation.\n :rtype: list of 3D ndarray of float\n\n \"\"\"\n p = self.params\n c1s = [ self.backend.LocalMax(s1, kwidth = p.c1_kwidth,\n scaling = p.c1_sampling) for s1 in s1s ]\n if p.c1_whiten:\n # Whiten each scale independently, modifying values in-place.\n map(Whiten, c1s)\n return c1s\n\n def BuildS2FromC1(self, c1s):\n \"\"\"Compute the S2 layer activity from multi-scale C1 activity.\n\n :param c1s: C1 maps indexed by scale and orientation.\n :type c1s: 4D ndarray of float, or list of 3D ndarray of float\n :returns: S2 maps indexed by scale and prototype.\n :rtype: list of 3D ndarray of float\n\n \"\"\"\n if self.s2_kernels == None or len(self.s2_kernels[0]) == 0:\n raise Exception(\"Need S2 kernels to compute S2 layer activity, but none \"\n \"were specified.\")\n kernels = self.s2_kernels[0]\n if len(c1s) == 0:\n return []\n p = self.params\n s2s = []\n backend_op = getattr(self.backend, p.s2_operation)\n for scale in range(p.num_scales):\n c1 = c1s[scale]\n s2 = backend_op(c1, kernels, bias = p.s2_bias, beta = p.s2_beta,\n scaling = p.s2_sampling)\n # Append 3D array to list.\n s2s.append(s2)\n return s2s\n\n# Add (circular) Model reference to State class.\nState.ModelClass = Model\n", "sub_path": "glimpse/models/ml/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 4757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "glimpse.models.misc.BaseState", "line_number": 22, "usage_type": "name"}, {"api_name": "glimpse.models.viz2.model.Model", "line_number": 26, "usage_type": "name"}, {"api_name": "glimpse.models.viz2.model.Layer", "line_number": 30, "usage_type": "name"}, {"api_name": "params.Params", "line_number": 33, "usage_type": "name"}, {"api_name": "glimpse.util.kernel.MakeGaborKernels", "line_number": 59, "usage_type": "call"}, {"api_name": "glimpse.util.kernel", "line_number": 59, "usage_type": "name"}, {"api_name": "scipy.ndimage.interpolation.zoom", "line_number": 82, "usage_type": "call"}, {"api_name": "glimpse.models.misc.Whiten", "line_number": 117, "usage_type": "argument"}]} +{"seq_id": "330718015", "text": "import glob\nimport requests\nimport json\nimport ExperimentBoiler\nimport geoDonorMinimiser\nimport geoBiosampleMinimiser\nimport urlparse\nimport sys\nfrom time import sleep\n\n\nHEADERS = {'accept': 'application/json'}\nGET_HEADERS = {'accept': 'application/json'}\nPOST_HEADERS = {'accept': 'application/json',\n 'Content-Type': 'application/json'}\n#SERVER = \"https://test.encodedcc.org/\"\nSERVER = \"https://www.encodeproject.org/\"\n\ndef encoded_get(url, keypair=None, frame='object', return_response=False):\n url_obj = urlparse.urlsplit(url)\n new_url_list = list(url_obj)\n query = urlparse.parse_qs(url_obj.query)\n if 'format' not in query:\n new_url_list[3] += \"&format=json\"\n if 'frame' not in query:\n new_url_list[3] += \"&frame=%s\" % (frame)\n if 'limit' not in query:\n new_url_list[3] += \"&limit=all\"\n if new_url_list[3].startswith('&'):\n new_url_list[3] = new_url_list[3].replace('&', '', 1)\n get_url = urlparse.urlunsplit(new_url_list)\n max_retries = 10\n max_sleep = 10\n while max_retries:\n try:\n if keypair:\n response = requests.get(get_url,\n auth=keypair,\n headers=GET_HEADERS)\n else:\n response = requests.get(get_url, headers=GET_HEADERS)\n except (requests.exceptions.ConnectionError,\n requests.exceptions.SSLError) as e:\n print >> sys.stderr, e\n sleep(max_sleep - max_retries)\n max_retries -= 1\n continue\n else:\n if return_response:\n return response\n else:\n return response.json()\n\n\ndef getKeyPair(path_to_key_pair_file, server_name):\n keysf = open(path_to_key_pair_file, 'r')\n keys_json_string = keysf.read()\n keysf.close()\n keys = json.loads(keys_json_string)\n key_dict = keys[server_name]\n AUTHID = key_dict['key']\n AUTHPW = key_dict['secret']\n return (AUTHID, AUTHPW)\n\n\ndef extract_biosamples(exp):\n samples = []\n if exp['status'] == 'released' and \\\n 'replicates' in exp and \\\n len(exp['replicates']) > 0:\n for replicate in exp['replicates']:\n if replicate['status'] == 'released' and \\\n replicate['library']['status'] == 'released' and \\\n replicate['library']['biosample']['status'] == 'released':\n samples.append(replicate['library']['biosample']['accession'])\n return list(set(samples))\n\n\ndef extract_controls(exp):\n if \"possible_controls\" in exp and \\\n len(exp['possible_controls']) > 0:\n controls_list = []\n for e in exp['possible_controls']:\n controls_list.append(e['accession'])\n\n return list(set(controls_list))\n else:\n return []\n\n\ndef extract_donors(biosamples_list):\n donors = []\n for biosample in biosamples_list:\n if biosample['status'] == 'released' and \\\n 'donor' in biosample and \\\n biosample['donor']['status'] == 'released':\n donors.append(biosample['donor']['accession'])\n return list(set(donors))\n\n\nkeypair = getKeyPair('keypairs.json', 'test')\n\nAUTHID = keypair[0]\nAUTHPW = keypair[1]\n\n# phase 1 - collect all experiments submitted so far.\n\nsubmittedExperiments = set()\nfor filename in glob.glob('../experiments/*.json'):\n submittedExperiments.add(filename.split('/')[2].split('_')[0])\n\ne3 =0\nother =0\nm = 0\nf_e3 = open('e3_submitted_to_geo.tsv', \"w\")\nx = open('not_e3_submitted_to_geo.tsv', \"w\")\n\nfor experiment in submittedExperiments:\n URL = SERVER + experiment + \"/?frame=embedded&format=json\"\n response = requests.get(URL, auth=(AUTHID, AUTHPW), headers=HEADERS)\n experiment_o = response.json()\n if experiment_o['award']['rfa']=='ENCODE3':\n e3 += 1\n f_e3.write(experiment + \"\\t\" + str(experiment_o['dbxrefs']) + '\\t' +experiment_o['award']['rfa'] + '\\n')\n else:\n other += 1\n x.write(experiment + \"\\t\" + str(experiment_o['dbxrefs']) + '\\t' + experiment_o['award']['rfa']+ '\\n')\n m += 1\n if m % 10 == 0:\n print ('processed ' + str(m))\n\nprint ('E3 = ' + str(e3) + ' other = ' + str(other))\nf_e3.close()\nx.close()\n", "sub_path": "src/report_script.py", "file_name": "report_script.py", "file_ext": "py", "file_size_in_byte": 4239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urlparse.urlsplit", "line_number": 20, "usage_type": "call"}, {"api_name": "urlparse.parse_qs", "line_number": 22, "usage_type": "call"}, {"api_name": "urlparse.urlunsplit", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 42, "usage_type": "attribute"}, {"api_name": "requests.exceptions", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 44, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 109, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "458758221", "text": "# -*- coding: utf-8 -*-\n# pylint: disable=missing-docstring,too-many-public-methods,invalid-name,protected-access,no-self-use\n\"\"\"\nListView pagination tests.\n\"\"\"\nimport math\n\nfrom common.peewee_model import SystemPlatform\nfrom manager.base import InvalidArgumentException\nfrom manager.list_view import ListView\n\nfrom .vuln_testcase import FlaskTestCase\n\nSORTABLE = {\n 'inventory_id': SystemPlatform.inventory_id,\n 'vmaas_json': SystemPlatform.vmaas_json,\n 'last_evaluation': SystemPlatform.last_evaluation\n}\nFILTERABLE = {}\nQUERY = (SystemPlatform.select(SystemPlatform.inventory_id))\nURI = 'http://localhost:6666/api/v1/vulnerability/systems'\n\nTOTAL_ITEMS = 127\nLIMIT = 5\nLIST_ARGS = {\n 'page': 4,\n 'page_size': 5,\n 'pages': 66,\n 'opt_out': 'foo',\n 'limit': LIMIT,\n 'offset': 15,\n 'total_items': TOTAL_ITEMS\n}\n\nQUERY_ARGS = {\n 'cvss_from': '2001-01-01', 'cvss_to': '2020-01-01',\n 'show_all': True, 'opt_out': True,\n 'status_id': 3,\n 'inventory_id': 'INV-ID-0001'\n}\n\n\nclass NoQueryListView(ListView):\n \"\"\"Pseudo-view used to test the basic math/param-processing of ListView and links\"\"\"\n\n def __init__(self, query, sortable_columns, filterable_columns, list_args, query_args, uri, total):\n self.total_items = total\n super(NoQueryListView, self).__init__(query, sortable_columns, filterable_columns, list_args, query_args, uri)\n\n def _apply_args(self, args):\n # Intercept so we can ignore the query\n self.active_filter = 'foo'\n self.active_sort = 'bar'\n self.page = args[\"page\"]\n self.page_size = args[\"page_size\"]\n self.limit = args[\"limit\"]\n self.offset = args[\"offset\"]\n pages = math.ceil(self.total_items / self.page_size)\n self.pages = pages if pages > 0 else 1\n\n if self.page > self.pages:\n raise InvalidArgumentException(\"Requested page out of range: %s\" % self.page)\n\n if self.offset > self.total_items:\n raise InvalidArgumentException(\"Requested starting offset out of range: %s\" % self.offset)\n\n\nclass TestLinks(FlaskTestCase):\n\n def test_first(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert view._get_first(0, LIMIT, TOTAL_ITEMS) == 0\n assert view._get_first(2, LIMIT, TOTAL_ITEMS) == 0\n\n def test_previous(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert view._get_previous(0, LIMIT, TOTAL_ITEMS) == 0\n assert view._get_previous(20, LIMIT, TOTAL_ITEMS) == 15\n assert view._get_previous(120, LIMIT, TOTAL_ITEMS) == 115\n assert view._get_previous(15, LIMIT, TOTAL_ITEMS) == 10\n assert view._get_previous(2, LIMIT, TOTAL_ITEMS) == 0\n\n def test_next(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert view._get_next(0, LIMIT, TOTAL_ITEMS) == 5\n assert view._get_next(20, LIMIT, TOTAL_ITEMS) == 25\n assert view._get_next(120, LIMIT, TOTAL_ITEMS) == 125\n assert view._get_next(16, LIMIT, TOTAL_ITEMS) == 20\n assert view._get_next(2, LIMIT, TOTAL_ITEMS) == 5\n\n def test_last(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert view._get_last(5, 5, TOTAL_ITEMS)\n assert view._get_last(0, 3, 1) == 0\n assert view._get_last(0, 3, 3) == 0\n assert view._get_last(0, 3, 5) == 3\n assert view._get_last(0, 3, 6) == 3\n assert view._get_last(0, 3, 7) == 6\n\n def test_first_link(self):\n LOCAL_LIST_ARGS = LIST_ARGS.copy()\n LOCAL_LIST_ARGS['offset'] = 0\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'limit=%s' % (LIMIT) in view._get_first_link()\n assert 'offset=0' in view._get_first_link()\n\n assert view._get_previous(0, LIMIT, TOTAL_ITEMS) == 0\n assert view._get_previous(20, LIMIT, TOTAL_ITEMS) == 15\n assert view._get_previous(120, LIMIT, TOTAL_ITEMS) == 115\n assert view._get_previous(15, LIMIT, TOTAL_ITEMS) == 10\n assert view._get_previous(2, LIMIT, TOTAL_ITEMS) == 0\n\n def test_prev_link(self):\n LOCAL_LIST_ARGS = LIST_ARGS.copy()\n LOCAL_LIST_ARGS['offset'] = 0\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert view._get_previous_link() is None\n\n LOCAL_LIST_ARGS['offset'] = 20\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=15' in view._get_previous_link()\n\n LOCAL_LIST_ARGS['offset'] = 120\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=115' in view._get_previous_link()\n\n LOCAL_LIST_ARGS['offset'] = 15\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=10' in view._get_previous_link()\n\n LOCAL_LIST_ARGS['offset'] = 2\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=0' in view._get_previous_link()\n\n def test_next_link(self):\n LOCAL_LIST_ARGS = LIST_ARGS.copy()\n LOCAL_LIST_ARGS['offset'] = 0\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=5' in view._get_next_link()\n\n LOCAL_LIST_ARGS['offset'] = 20\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=25' in view._get_next_link()\n\n LOCAL_LIST_ARGS['offset'] = 120\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=125' in view._get_next_link()\n\n LOCAL_LIST_ARGS['offset'] = 16\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=20' in view._get_next_link()\n\n LOCAL_LIST_ARGS['offset'] = 2\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=5' in view._get_next_link()\n\n def test_last_link(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n assert 'offset=125' in view._get_last_link()\n\n args = LIST_ARGS.copy()\n args['page'] = 0\n args['page_size'] = 3\n args['offset'] = 0\n args['limit'] = 3\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 1)\n assert 'offset=0' in view._get_last_link()\n\n args['total_items'] = 3\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 3)\n assert 'offset=0' in view._get_last_link()\n\n args['total_items'] = 5\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 5)\n assert 'offset=3' in view._get_last_link()\n\n args['total_items'] = 6\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 6)\n assert 'offset=3' in view._get_last_link()\n\n args['total_items'] = 7\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 7)\n assert 'offset=6' in view._get_last_link()\n\n def test_links_stanza(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n links = view.get_pagination_links()\n assert links['first'] == view._get_first_link()\n assert links['next'] == view._get_next_link()\n assert links['previous'] == view._get_previous_link()\n assert links['last'] == view._get_last_link()\n\n def test_links_filters(self):\n view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)\n last_link = view._get_last_link()\n assert 'cvss_from=2001-01-01' in last_link\n assert 'cvss_to=2020-01-01' in last_link\n assert 'show_all=True' in last_link\n assert 'opt_out=True' in last_link\n assert 'status_id=3' in last_link\n assert 'inventory_id=INV-ID-0001' in last_link\n\n args = QUERY_ARGS.copy()\n del args['show_all']\n view = NoQueryListView(QUERY, SORTABLE, args, LIST_ARGS, args, URI, TOTAL_ITEMS)\n last_link = view._get_last_link()\n assert 'show_all=True' not in last_link\n", "sub_path": "tests/manager_tests/test_links.py", "file_name": "test_links.py", "file_ext": "py", "file_size_in_byte": 8667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "common.peewee_model.SystemPlatform.inventory_id", "line_number": 15, "usage_type": "attribute"}, {"api_name": "common.peewee_model.SystemPlatform", "line_number": 15, "usage_type": "name"}, {"api_name": "common.peewee_model.SystemPlatform.vmaas_json", "line_number": 16, "usage_type": "attribute"}, {"api_name": "common.peewee_model.SystemPlatform", "line_number": 16, "usage_type": "name"}, {"api_name": "common.peewee_model.SystemPlatform.last_evaluation", "line_number": 17, "usage_type": "attribute"}, {"api_name": "common.peewee_model.SystemPlatform", "line_number": 17, "usage_type": "name"}, {"api_name": "common.peewee_model.SystemPlatform.select", "line_number": 20, "usage_type": "call"}, {"api_name": "common.peewee_model.SystemPlatform", "line_number": 20, "usage_type": "name"}, {"api_name": "common.peewee_model.SystemPlatform.inventory_id", "line_number": 20, "usage_type": "attribute"}, {"api_name": "manager.list_view.ListView", "line_number": 43, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 58, "usage_type": "call"}, {"api_name": "manager.base.InvalidArgumentException", "line_number": 62, "usage_type": "call"}, {"api_name": "manager.base.InvalidArgumentException", "line_number": 65, "usage_type": "call"}, {"api_name": "vuln_testcase.FlaskTestCase", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "277593907", "text": "#Code to run a quantum random number generator on a real quantum device.\nfrom qiskit import QuantumCircuit, IBMQ, execute\n\n#Authenticate an account and add for use during this session.\nIBMQ.enable_account(\"YOUR_API_TOKEN\")\nprovider = IBMQ.get_provider(hub='ibm-q')\n\n#Initialize the number of qubits and classical registers\nnumber =3\ncircuit = QuantumCircuit(number, number)\n\n#Apply an hadamard gate to every qubits\ncircuit.h(range(number))\n\n#Measure every qubits\ncircuit.measure(range(number), range(number))\n\n# Set the quantum device and execute the quantum circuit\nbackend = provider.get_backend('ibmq_belem')\njob = execute(circuit, backend, shots=1)\n\n#Get and print results\nresult = job.result()\nprint(result.get_counts())\n", "sub_path": "quantum_coins.py", "file_name": "quantum_coins.py", "file_ext": "py", "file_size_in_byte": 726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "qiskit.IBMQ.enable_account", "line_number": 5, "usage_type": "call"}, {"api_name": "qiskit.IBMQ", "line_number": 5, "usage_type": "name"}, {"api_name": "qiskit.IBMQ.get_provider", "line_number": 6, "usage_type": "call"}, {"api_name": "qiskit.IBMQ", "line_number": 6, "usage_type": "name"}, {"api_name": "qiskit.QuantumCircuit", "line_number": 10, "usage_type": "call"}, {"api_name": "qiskit.execute", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "86064595", "text": "# coding=UTF-8\n#%matplotlib inline\nimport visa\nimport time\nimport datetime\nimport numpy as np\n\nN=50\nppsvalue=np.array([5.50, 5.00, 4.5, 3.60, 3.30, 3.00, 2.70, 2.20])\n\nrm = visa.ResourceManager()\npps=rm.open_resource('GPIB0::6::INSTR')\ncnter= rm.open_resource('GPIB0::3::INSTR')\nprint(pps.query('*MODEL?'))\nprint(cnter.query('*IDN?'))\npps.write ('OVSET1 9.00; OVP 1; OCP 1; ISET1 1.00')\npps.write('VSET1 3.00;OUT1 1')\ndel cnter.timeout\ntime.sleep(5)\n\nfor ppsv in ppsvalue:\n\tfilename ='file'+time.strftime(\"%m%d%H%M%S\", time.localtime())+'.txt'\n\tpps.write('VSET1 '+str(ppsv)+';OUT1 1')\n\ttime.sleep(1)\n\n\tfiletemp = open (filename, mode='a')\n\tfiletemp.write(\"VDD=\"+str(ppsv)+\"\\n\")\n\tfiletemp.close\t\n\t#print ('VDD='+str(ppsv))\n\n\tfcnt=0\n\t\n\tfor fcnt in range(N):\n\t\tmeafre = cnter.query(\"FETCH:FREQ?\")\n\t\tfiletemp = open (filename, mode='a')\n\t\tfiletemp.write(str (float(meafre))+\"\\n\")\n\t\tfiletemp.close\t\n\t\tprint ('VDD=' + str(ppsv) + str((float(meafre)-1)))\n\t\t#print (str ((float(meafre)-1)))\n\t\t\t\nprint ('finished')\t\n\t\n", "sub_path": "array_test.py", "file_name": "array_test.py", "file_ext": "py", "file_size_in_byte": 1009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "visa.ResourceManager", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 22, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "571468285", "text": "from starlette.routing import Router, Route\nfrom starlette.requests import Request\nfrom starlette.authentication import requires\nfrom omo.views import template_env, template\nfrom omo.db import database\nfrom omo.middlewares import COOKIES_SESSION_TOKEN_KEY\n\n\n@requires('authenticated', redirect='login')\nasync def my_account(request: Request):\n \"\"\"\n This returns the member's account details\n \"\"\"\n page = template_env.get_template('my-account.html')\n context = {'request': request}\n\n token = request.cookies[COOKIES_SESSION_TOKEN_KEY]\n query = 'SELECT id, first_name, last_name, email FROM member WHERE token = :token'\n fetch = await database.fetch_one(query=query, values={'token': token})\n \n if fetch:\n member_id = fetch['id']\n first_name = fetch['first_name']\n last_name = fetch['last_name']\n email = fetch['email']\n context['member_details'] = {'id': member_id,\n 'first_name': f'{first_name} {last_name}',\n 'email': email\n }\n return template.TemplateResponse(page, context=context)\n\n\naccounts_router = Router(routes=[\n Route('/my_account/', endpoint=my_account, methods=['GET'])\n])\n", "sub_path": "omo/routes/accounts.py", "file_name": "accounts.py", "file_ext": "py", "file_size_in_byte": 1263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "starlette.requests.Request", "line_number": 10, "usage_type": "name"}, {"api_name": "omo.views.template_env.get_template", "line_number": 14, "usage_type": "call"}, {"api_name": "omo.views.template_env", "line_number": 14, "usage_type": "name"}, {"api_name": "omo.middlewares.COOKIES_SESSION_TOKEN_KEY", "line_number": 17, "usage_type": "name"}, {"api_name": "omo.db.database.fetch_one", "line_number": 19, "usage_type": "call"}, {"api_name": "omo.db.database", "line_number": 19, "usage_type": "name"}, {"api_name": "omo.views.template.TemplateResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "omo.views.template", "line_number": 30, "usage_type": "name"}, {"api_name": "starlette.authentication.requires", "line_number": 9, "usage_type": "call"}, {"api_name": "starlette.routing.Router", "line_number": 33, "usage_type": "call"}, {"api_name": "starlette.routing.Route", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "529240850", "text": "import sys\nsys.path.append('C:\\E\\mysoft\\python-workSpace\\pythons\\test-dash2')\nimport pandas as pd\nimport pymysql\nfrom sshtunnel import SSHTunnelForwarder\nfrom sqlalchemy import create_engine\nfrom pyecharts.charts import Bar\nfrom example.commons import Faker\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Page, Pie, Gauge, Line\nfrom pyecharts.globals import ThemeType\nimport pyecharts.commons.utils as results\n\n\n\n# 连接线上db_itouzi主库\ndef db_itz_conn():\n db_itz_conn = pymysql.connect(host='172.16.3.127', port=3306, user='chenlianqing',\n passwd='zEtwv4qaxs4mMox', db='db_itouzi', charset='utf8')\n return db_itz_conn\n\n\n# 连接ecshop主库 rm-2zes9s7zvt5z2il509o.mysql.rds.aliyuncs.com,修改为从库\ndef db_ecshop_conn():\n db_ecshop_conn = pymysql.connect(\n host='huanhuan103', port=3306,\n user='yanan', passwd='qosH3$)!.s', db='ec_shop',\n charset='utf8')\n return db_ecshop_conn\n\n\n#链接db_clq数据库\ndef db_clq_conn():\n db_clq_conn = pymysql.connect(host='172.16.3.127', port=3306, user='chenlianqing',passwd='zEtwv4qaxs4mMox', db='db_clq', charset='utf8')\n return db_clq_conn\n\n\n#链接线下统计库的ecshop\ndef ol_new_shop_conn():\n conn = pymysql.connect(host='39.107.136.209',port=3306,user='root',passwd='df@#88%nQWE',db='ecshop',charset='utf8')\n print('ol_ecshop_conn connected via SSH')\n return conn\n\n\n# 获取数据库中的数据\n# 目标值\ntarget = 80000000\n\n\n# 本月累计销售额\ndef month_gmv():\n # conn = localconn.db_ecshop_conn()\n conn = db_ecshop_conn()\n gmv_sql = \"\"\"SELECT sum(money_paid+surplus) AS 'goods_amount' FROM itz_order_info\n WHERE ((pay_status=2 AND order_type in (0,2,3,4)) OR (pay_status = 1\n AND order_type = 1\n AND order_id IN (SELECT DISTINCT order_id\n FROM itz_order_instalment\n WHERE pay_status = 2)))\n AND add_time>=unix_timestamp(concat(date_format(LAST_DAY(now()),'%Y-%m-'),'01'))\n AND add_time<=unix_timestamp(LAST_DAY(now()));\"\"\"\n gmv = pd.read_sql(gmv_sql, conn, index_col=None)\n values = [round(gmv['goods_amount'][0] / target, 2), round(1 - gmv['goods_amount'][0] / target, 2)]\n\n # conn = localconn.db_ecshop_conn()\n conn = db_ecshop_conn()\n user_sql = \"\"\"SELECT DISTINCT user_id FROM itz_order_info\nWHERE add_time>=unix_timestamp(concat(date_format(LAST_DAY(now()),'%Y-%m-'),'01'))\nAND add_time<=unix_timestamp(LAST_DAY(now()))\nAND ((pay_status=2 AND order_type in (0,2,3,4)) OR (pay_status = 1\n AND order_type = 1\n AND order_id IN (SELECT DISTINCT order_id\n FROM itz_order_instalment\n WHERE pay_status = 2)));\"\"\"\n user = pd.read_sql(user_sql,conn,index_col=None)\n user_list = user['user_id'].tolist()\n\n # conn = localconn.ol_new_shop_conn()\n conn = ol_new_shop_conn()\n user_tag_sql = \"\"\"SELECT\n b2c_userid,\n xingbie,\n age,\n province,\n capital,\n all_debt_money\n FROM itz_hh_user_spark\n WHERE b2c_userid in {};\"\"\".format(tuple(user_list))\n user_tag = pd.read_sql(user_tag_sql,conn,index_col=None)\n user_tag['b2c_userid'] = user_tag['b2c_userid'].astype('int')\n user_tag['age'] = user_tag['age'].fillna(0)\n user_tag['age'] = user_tag['age'].astype('int')\n bins = [0,20,30,40,50,60,user_tag.age.max()]\n # labels = [\"0-20\",\"20-30\",\"30-40\",\"40-50\",\"50-60\",\"60-100\"]\n user_tag['age_region'] = pd.cut(user_tag['age'],bins=bins,right=True)\n user_tag['age_region'] = user_tag['age_region'].astype(\"str\")\n aa = user_tag.groupby('xingbie',as_index=False).agg({'b2c_userid':'count'})\n bb = user_tag.groupby('age_region',as_index=False).agg({'b2c_userid':'count'})\n # print(\"na $$$$$$: \",user_tag.loc[user_tag[\"age\"].isna()]['age'])\n # print(\"null ***** \",user_tag.loc[user_tag[\"age\"].isnull()]['age'])\n\n # print(\"user_tag$$$$$$$$$: \",user_tag['b2c_userid'].count())\n # print(\"bbbbbbbbbbbb: \",bb['b2c_userid'].sum())\n cc = user_tag.groupby('province',as_index=False).agg({'b2c_userid':'count'})\n\n bins = [0,100,1000,10000,50000,100000,500000,1000000,user_tag.capital.max()+1]\n user_tag['capital_region'] = pd.cut(user_tag['capital'],bins=bins,right=False)\n user_tag['capital_region'] = user_tag['capital_region'].astype(\"str\")\n dd = user_tag.groupby('capital_region',as_index=False).agg({'b2c_userid':'count'})\n return values,user_tag,aa,bb,cc,dd\n\n# 打开数据库连接\ndef getConnect():\n db = pymysql.connect(\"39.107.136.209:3306\",\n \"root\", \"df@#88%nQWE@\", \"ecshop\",charset=\"utf8mb4\")\n return db\n\n# 将结果保存到huanhuan101:ecshop\ndef getHuanhuanEcshop():\n # 创建对应的执行引擎\n result = create_engine(\n \"mysql+pymysql://root:df@#88%nQWE@39.107.136.209:3306/ecshop?charset=utf8mb4\",\n echo=False, pool_pre_ping=True)\n\n return result\n\n\n# 设置颜色bar\n\ncolor_function_bar = \"\"\"\n function (params) {\n return '#07CDFF';\n }\n \"\"\"\n\nbar = Bar(init_opts=opts.InitOpts(width=\"630px\", height=\"450px\",theme=ThemeType.CHALK)) # width=\"850px\", height=\"650px\"\nbar.add_xaxis(month_gmv()[3][\"age_region\"].tolist())\nbar.add_yaxis(\"不同年龄段购买人数\", month_gmv()[3][\"b2c_userid\"].tolist(),\n itemstyle_opts=opts.ItemStyleOpts(color=results.JsCode(color_function_bar)))\n\nbar.set_global_opts(\n xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-30)),\n title_opts=opts.TitleOpts(title=\"不同年龄段购买人数\")\n # title_opts=opts.TitleOpts(title=\"Bar-旋转X轴标签\", subtitle=\"解决标签名字过长的问题\"),\n )\n\n\n\n# 仪表盘\ndef gauge_base() -> Gauge:\n c = (\n # Gauge(init_opts=opts.InitOpts(width=\"850px\", height=\"650px\"))\n Gauge(init_opts=opts.InitOpts(width=\"630px\", height=\"450px\",theme=ThemeType.CHALK))\n .add(\"\", [(\"完成率\", int(month_gmv()[0][0] * 100))])\n .set_global_opts(title_opts=opts.TitleOpts(title=\"当月目标完成率\"))\n )\n return c\n\n# 饼图\ndef pie_base() -> Pie:\n color_function = \"\"\"\n function (params) {\n return '#07CDFF';\n }\n \"\"\"\n c = (\n # Pie(init_opts=opts.InitOpts(width=\"850px\", height=\"650px\"))\n Pie(init_opts=opts.InitOpts(width=\"630px\", height=\"450px\",theme=ThemeType.CHALK))\n # .add(\"\", [list(z) for z in zip(Faker.choose(), Faker.values())])\n .add(\"\", [list(z) for z in zip(['男','女'], month_gmv()[2][\"b2c_userid\"])],) #\n .set_colors([\"#6055FC\",\"#01FFEA\"]) # 设置饼状图的颜色\n .set_global_opts(title_opts=opts.TitleOpts(title=\"近一个月男女购买比例\"))\n # .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c} {d}%\"))\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c} {d}%\"))\n )\n return c\n\n\n###########################################\n\ndef pie_rich_label22() -> Pie:\n c = (\n Pie(init_opts=opts.InitOpts(width=\"630px\", height=\"450px\",theme=ThemeType.CHALK))\n .add(\n \"\",\n # [list(z) for z in zip(Faker.choose(), Faker.values())],\n [list(z) for z in zip(['男','女'], month_gmv()[2][\"b2c_userid\"])],\n # radius=[\"40%\", \"55%\"],\n radius=[\"50%\", \"65%\"],\n label_opts=opts.LabelOpts(\n # position=\"outside\",\n # formatter=\"{a|{a}}{abg|}\\n{hr|}\\n {b|{b}: }{c} {per|{d}%} \",\n formatter=\"{b}: {c} {d}%\", # b 名称, c 数量, d 百分比\n # background_color=\"#eee\",\n # border_color=\"#aaa\",\n # border_width=1,\n # border_radius=4,\n # rich={\n # \"a\": {\"color\": \"#999\", \"lineHeight\": 22, \"align\": \"center\"},\n # \"abg\": {\n # \"backgroundColor\": \"#e3e3e3\",\n # \"width\": \"100%\",\n # \"align\": \"right\",\n # \"height\": 22,\n # \"borderRadius\": [4, 4, 0, 0],\n # },\n # \"hr\": {\n # \"borderColor\": \"#aaa\",\n # \"width\": \"100%\",\n # \"borderWidth\": 0.5,\n # \"height\": 0,\n # },\n # \"b\": {\"fontSize\": 16, \"lineHeight\": 33},\n # \"per\": {\n # \"color\": \"#eee\",\n # \"backgroundColor\": \"#334455\",\n # \"padding\": [2, 4],\n # \"borderRadius\": 2,\n # },\n # },\n ),\n )\n .set_colors([\"#6055FC\", \"#01FFEA\"]) # 设置饼状图的颜色\n .set_global_opts(title_opts=opts.TitleOpts(title=\"近一个月男女购买比例\"))\n )\n return c\n\n\n#############################################\n\n\n# 折线图\ndef line_markpoint() -> Line:\n c = (\n Line(init_opts=opts.InitOpts(width=\"630px\", height=\"450px\",theme=ThemeType.CHALK)) # width=\"850px\", height=\"650px\"\n # .add_xaxis(Faker.choose())\n .add_xaxis(month_gmv()[5][\"capital_region\"].tolist())\n .add_yaxis(\n \"近一个月不同待还金额区间购买人数分布\",\n # Faker.values(),\n month_gmv()[5][\"b2c_userid\"].tolist(),\n markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_=\"min\")]),\n )\n # .add_yaxis(\n # \"商家B\",\n # Faker.values(),\n # markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_=\"max\")]),\n # )\n # .set_global_opts(title_opts=opts.TitleOpts(title=\"Line-MarkPoint\"))\n # 设置旋转的x坐标轴\n .set_global_opts(\n xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15)),\n title_opts=opts.TitleOpts(title=\"待还金额区间购买人数\"),\n # title_opts=opts.TitleOpts(title=\"Bar-旋转X轴标签\", subtitle=\"解决标签名字过长的问题\"),\n )\n .set_colors([\"#07CDFF\"])\n\n )\n return c\n\n# bar.render()\n\n# 柱状图\n# style=\"width:1100px; height:700px\ndef bar_base() -> Bar:\n color_function = \"\"\"\n function (params) {\n return '#07CDFF';\n }\n \"\"\"\n c = ( # 1300px 1260px\n Bar(init_opts=opts.InitOpts(width=\"1260px\", height=\"650px\",theme=ThemeType.CHALK)) # ,theme=ThemeType.DARK\n # .add_xaxis(Faker.choose())\n .add_xaxis(month_gmv()[4][\"province\"].tolist())\n # .add_yaxis(\"商家A\", Faker.values())\n .add_yaxis(\"近一个月内不同城市购买人数分布\", month_gmv()[4][\"b2c_userid\"].tolist(),\n itemstyle_opts=opts.ItemStyleOpts(color=results.JsCode(color_function)))\n # .add_yaxis(\"商家B\", Faker.values())\n # .set_global_opts(title_opts=opts.TitleOpts(title=\"Bar-基本示例\", subtitle=\"我是副标题\"))\n .set_global_opts(\n xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-30)),\n title_opts=opts.TitleOpts(title=\"不同城市购买人数分布\"),\n # title_opts=opts.TitleOpts(title=\"Bar-旋转X轴标签\", subtitle=\"解决标签名字过长的问题\"),\n )\n # .set_series_opts(\n # label_opts=opts.LabelOpts(is_show=False),\n # markline_opts=opts.MarkLineOpts(\n # data=[\n # opts.MarkLineItem(type_=\"min\", name=\"最小值\"),\n # opts.MarkLineItem(type_=\"max\", name=\"最大值\"),\n # opts.MarkLineItem(type_=\"average\", name=\"平均值\"),\n # # opts.MarkLineItem(value_index = [200,400,600,800,1000]),\n # ],\n #\n # ),\n # # 设置线的类型: 实体线\n # linestyle_opts=opts.LineStyleOpts(type_=\"solid\")\n # )\n )\n return c\n\npage = Page(layout=Page.SimplePageLayout)\n# 需要自行调整每个 chart 的 height/width,显示效果在不同的显示器上可能不同\npage.add(gauge_base(),pie_rich_label22(),bar, line_markpoint(), bar_base()) # ,pie_base()\n# page.add(bar_base())\n# page.render()\n\n\n\nif __name__ == '__main__':\n\n # app.run_server(8080,debug=True)\n # page.render() C:\\E\\mysoft\\python-workSpace\\pythons\\djang1\\templates\n page.render(\"C:/E/mysoft/python-workSpace/pythons/djang1/templates/result.html\")\n # print(\"$$$$$$$: \",)\n\n\n\n\n # tuples = month_gmv()\n # print(\"values00000: \",tuples[0])\n # print(\"user_tag11111: \",tuples[1])\n # print(\"aa22222: \",tuples[2])\n # print(\"bb33333: \",tuples[3])\n # print(\"cc44444: \",tuples[4])\n # print(\"dd55555: \",tuples[5])\n #\n # print(\"type$$$$$$$$$$: \",type(month_gmv()[0][0] * 100))\n\n print(\"==============start===========\")\n\n # print(month_gmv()[4][\"province\"])\n # print(month_gmv()[4][\"b2c_userid\"])\n # print(month_gmv()[3][\"age_region\"].tolist())\n # print(month_gmv()[5][\"capital_region\"].tolist())\n", "sub_path": "manager/pyecharts_results.py", "file_name": "pyecharts_results.py", "file_ext": "py", "file_size_in_byte": 12969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 18, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 107, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 114, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 121, "usage_type": "call"}, {"api_name": "pyecharts.charts.Bar", "line_number": 136, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 136, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 136, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 136, "usage_type": "name"}, {"api_name": "pyecharts.options.ItemStyleOpts", "line_number": 139, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 139, "usage_type": "name"}, {"api_name": "pyecharts.commons.utils.JsCode", "line_number": 139, "usage_type": "call"}, {"api_name": "pyecharts.commons.utils", "line_number": 139, "usage_type": "name"}, {"api_name": "pyecharts.options.AxisOpts", "line_number": 142, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 142, "usage_type": "name"}, {"api_name": "pyecharts.options.LabelOpts", "line_number": 142, "usage_type": "call"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 143, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 143, "usage_type": "name"}, {"api_name": "pyecharts.charts.Gauge", "line_number": 153, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 153, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 153, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 153, "usage_type": "name"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 155, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 155, "usage_type": "name"}, {"api_name": "pyecharts.charts.Gauge", "line_number": 150, "usage_type": "name"}, {"api_name": "pyecharts.charts.Pie", "line_number": 168, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 168, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 168, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 168, "usage_type": "name"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 172, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 172, "usage_type": "name"}, {"api_name": "pyecharts.options.LabelOpts", "line_number": 174, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 174, "usage_type": "name"}, {"api_name": "pyecharts.charts.Pie", "line_number": 160, "usage_type": "name"}, {"api_name": "pyecharts.charts.Pie", "line_number": 183, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 183, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 183, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 183, "usage_type": "name"}, {"api_name": "pyecharts.options.LabelOpts", "line_number": 190, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 190, "usage_type": "name"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 224, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 224, "usage_type": "name"}, {"api_name": "pyecharts.charts.Pie", "line_number": 181, "usage_type": "name"}, {"api_name": "pyecharts.charts.Line", "line_number": 235, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 235, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 235, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 235, "usage_type": "name"}, {"api_name": "pyecharts.options.MarkPointOpts", "line_number": 242, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 242, "usage_type": "name"}, {"api_name": "pyecharts.options.MarkPointItem", "line_number": 242, "usage_type": "call"}, {"api_name": "pyecharts.options.AxisOpts", "line_number": 252, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 252, "usage_type": "name"}, {"api_name": "pyecharts.options.LabelOpts", "line_number": 252, "usage_type": "call"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 253, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 253, "usage_type": "name"}, {"api_name": "pyecharts.charts.Line", "line_number": 233, "usage_type": "name"}, {"api_name": "pyecharts.charts.Bar", "line_number": 272, "usage_type": "call"}, {"api_name": "pyecharts.options.InitOpts", "line_number": 272, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 272, "usage_type": "name"}, {"api_name": "pyecharts.globals.ThemeType.CHALK", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pyecharts.globals.ThemeType", "line_number": 272, "usage_type": "name"}, {"api_name": "pyecharts.options.ItemStyleOpts", "line_number": 277, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 277, "usage_type": "name"}, {"api_name": "pyecharts.commons.utils.JsCode", "line_number": 277, "usage_type": "call"}, {"api_name": "pyecharts.commons.utils", "line_number": 277, "usage_type": "name"}, {"api_name": "pyecharts.options.AxisOpts", "line_number": 281, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 281, "usage_type": "name"}, {"api_name": "pyecharts.options.LabelOpts", "line_number": 281, "usage_type": "call"}, {"api_name": "pyecharts.options.TitleOpts", "line_number": 282, "usage_type": "call"}, {"api_name": "pyecharts.options", "line_number": 282, "usage_type": "name"}, {"api_name": "pyecharts.charts.Bar", "line_number": 265, "usage_type": "name"}, {"api_name": "pyecharts.charts.Page", "line_number": 302, "usage_type": "call"}, {"api_name": "pyecharts.charts.Page.SimplePageLayout", "line_number": 302, "usage_type": "attribute"}]} +{"seq_id": "511044401", "text": "import datetime\nimport matplotlib.pyplot as plt\ndata = []\nx = []\ny = []\nwith open('forplot') as file:\n for i in file.readlines():\n splitted = i.split()\n datestr = splitted[0]+' '+splitted[1]\n date = datetime.datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S.%f') # 2020-02-25 12:29:46.040\n data.append((date,int(splitted[2])))\ndata.sort(key=lambda x: x[0])\nx = [i[0] for i in data]\na = 0\nfor i in data:\n a += i[1]\n y.append(a/128)\n(fig, ax) = plt.subplots(1, 1)\nax.plot(x, y)\nfor n, label in enumerate(ax.xaxis.get_ticklabels()):\n if n % 2 != 0:\n label.set_visible(False)\nax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))\nplt.ylabel('Kbit')\nplt.savefig('plot.png')\n", "sub_path": "lab2/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.FormatStrFormatter", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "290709786", "text": "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nThis module provides classes to interface with the Crystallography Open\nDatabase. If you use data from the COD, please cite the following works (as\nstipulated by the COD developers)::\n\n Merkys, A., Vaitkus, A., Butkus, J., Okulič-Kazarinas, M., Kairys, V. &\n Gražulis, S. (2016) \"COD::CIF::Parser: an error-correcting CIF parser for\n the Perl language\". Journal of Applied Crystallography 49.\n\n Gražulis, S., Merkys, A., Vaitkus, A. & Okulič-Kazarinas, M. (2015)\n \"Computing stoichiometric molecular composition from crystal structures\".\n Journal of Applied Crystallography 48, 85-91.\n\n Gražulis, S., Daškevič, A., Merkys, A., Chateigner, D., Lutterotti, L.,\n Quirós, M., Serebryanaya, N. R., Moeck, P., Downs, R. T. & LeBail, A.\n (2012) \"Crystallography Open Database (COD): an open-access collection of\n crystal structures and platform for world-wide collaboration\". Nucleic\n Acids Research 40, D420-D427.\n\n Grazulis, S., Chateigner, D., Downs, R. T., Yokochi, A. T., Quiros, M.,\n Lutterotti, L., Manakova, E., Butkus, J., Moeck, P. & Le Bail, A. (2009)\n \"Crystallography Open Database – an open-access collection of crystal\n structures\". J. Appl. Cryst. 42, 726-729.\n\n Downs, R. T. & Hall-Wallace, M. (2003) \"The American Mineralogist Crystal\n Structure Database\". American Mineralogist 88, 247-250.\n\"\"\"\n\nimport requests\nimport subprocess\nfrom monty.dev import requires\nfrom monty.os.path import which\n\nimport re\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.util.string import formula_double_format\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"shyuep@gmail.com\"\n\n\nclass COD:\n \"\"\"\n An interface to the Crystallography Open Database.\n \"\"\"\n\n def __init__(self):\n pass\n\n def query(self, sql):\n r = subprocess.check_output([\"mysql\", \"-u\", \"cod_reader\", \"-h\",\n \"www.crystallography.net\", \"-e\",\n sql, \"cod\"])\n return r.decode(\"utf-8\")\n\n @requires(which(\"mysql\"), \"mysql must be installed to use this query.\")\n def get_cod_ids(self, formula):\n \"\"\"\n Queries the COD for all cod ids associated with a formula. Requires\n mysql executable to be in the path.\n\n Args:\n formula (str): Formula.\n\n Returns:\n List of cod ids.\n \"\"\"\n # TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!\n\n # Standardize formula to the version used by COD.\n\n sql = 'select file from data where formula=\"- %s -\"' % \\\n Composition(formula).hill_formula\n text = self.query(sql).split(\"\\n\")\n cod_ids = []\n for l in text:\n m = re.search(r\"(\\d+)\", l)\n if m:\n cod_ids.append(int(m.group(1)))\n return cod_ids\n\n def get_structure_by_id(self, cod_id, **kwargs):\n \"\"\"\n Queries the COD for a structure by id.\n\n Args:\n cod_id (int): COD id.\n kwargs: All kwargs supported by\n :func:`pymatgen.core.structure.Structure.from_str`.\n\n Returns:\n A Structure.\n \"\"\"\n r = requests.get(\"http://www.crystallography.net/cod/%s.cif\" % cod_id)\n return Structure.from_str(r.text, fmt=\"cif\", **kwargs)\n\n @requires(which(\"mysql\"), \"mysql must be installed to use this query.\")\n def get_structure_by_formula(self, formula, **kwargs):\n \"\"\"\n Queries the COD for structures by formula. Requires mysql executable to\n be in the path.\n\n Args:\n cod_id (int): COD id.\n kwargs: All kwargs supported by\n :func:`pymatgen.core.structure.Structure.from_str`.\n\n Returns:\n A list of dict of the format\n [{\"structure\": Structure, \"cod_id\": cod_id, \"sg\": \"P n m a\"}]\n \"\"\"\n structures = []\n sql = 'select file, sg from data where formula=\"- %s -\"' % \\\n Composition(formula).hill_formula\n text = self.query(sql).split(\"\\n\")\n text.pop(0)\n for l in text:\n if l.strip():\n cod_id, sg = l.split(\"\\t\")\n r = requests.get(\"http://www.crystallography.net/cod/%s.cif\"\n % cod_id.strip())\n try:\n s = Structure.from_str(r.text, fmt=\"cif\", **kwargs)\n structures.append({\"structure\": s, \"cod_id\": int(cod_id),\n \"sg\": sg})\n except Exception:\n import warnings\n warnings.warn(\"\\nStructure.from_str failed while parsing CIF file:\\n%s\" % r.text)\n raise\n\n return structures\n", "sub_path": "pymatgen/ext/cod.py", "file_name": "cod.py", "file_ext": "py", "file_size_in_byte": 5029, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "subprocess.check_output", "line_number": 59, "usage_type": "call"}, {"api_name": "pymatgen.core.composition.Composition", "line_number": 81, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "monty.dev.requires", "line_number": 64, "usage_type": "call"}, {"api_name": "monty.os.path.which", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 102, "usage_type": "call"}, {"api_name": "pymatgen.core.structure.Structure.from_str", "line_number": 103, "usage_type": "call"}, {"api_name": "pymatgen.core.structure.Structure", "line_number": 103, "usage_type": "name"}, {"api_name": "pymatgen.core.composition.Composition", "line_number": 122, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 128, "usage_type": "call"}, {"api_name": "pymatgen.core.structure.Structure.from_str", "line_number": 131, "usage_type": "call"}, {"api_name": "pymatgen.core.structure.Structure", "line_number": 131, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 136, "usage_type": "call"}, {"api_name": "monty.dev.requires", "line_number": 105, "usage_type": "call"}, {"api_name": "monty.os.path.which", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "564388264", "text": "from pytorch_metric_learning import losses, miners, trainers\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom torchvision import datasets, models, transforms\r\nimport torch.nn as nn\r\nimport torch.optim\r\nimport logging\r\nfrom torch.utils.data import Dataset\r\nfrom PIL import Image\r\nfrom cub2011 import Cub2011\r\nfrom mobilenet import mobilenet_v2 \r\n\r\n\r\nlogging.getLogger().setLevel(logging.INFO)\r\n\r\n\r\n\r\n\r\n# This is a basic multilayer perceptron\r\n# This code is from https://github.com/KevinMusgrave/powerful_benchmarker\r\nclass MLP(nn.Module):\r\n # layer_sizes[0] is the dimension of the input\r\n # layer_sizes[-1] is the dimension of the output\r\n def __init__(self, layer_sizes, final_relu=False):\r\n super().__init__()\r\n layer_list = []\r\n layer_sizes = [int(x) for x in layer_sizes]\r\n num_layers = len(layer_sizes) - 1\r\n final_relu_layer = num_layers if final_relu else num_layers - 1\r\n for i in range(len(layer_sizes) - 1):\r\n input_size = layer_sizes[i]\r\n curr_size = layer_sizes[i + 1]\r\n if i < final_relu_layer:\r\n layer_list.append(nn.ReLU(inplace=True))\r\n layer_list.append(nn.Linear(input_size, curr_size))\r\n self.net = nn.Sequential(*layer_list)\r\n self.last_linear = self.net[-1]\r\n\r\n def forward(self, x):\r\n return self.net(x)\r\n\r\n\r\n# This is for replacing the last layer of a pretrained network.\r\n# This code is from https://github.com/KevinMusgrave/powerful_benchmarker\r\n\r\n\r\n\r\nclass Identity(nn.Module):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, x):\r\n return x\r\n\r\n\r\n\r\n\r\nclass Normalize(nn.Module):\r\n def __init__(self,num_feat):\r\n super().__init__()\r\n self.bn1 = nn.BatchNorm1d(num_feat)\r\n def forward(self,x):\r\n #orm = nn.BatchNorm1d(self.num_feat)\r\n return self.bn1(x)\r\n\r\n\r\n\r\n\r\n#####################\r\n### tambahan ########\r\n#####################\r\n\r\n\r\nclass StandfordProducts(Dataset) :\r\n def __init__(self,root,image_path,transform,train=True):\r\n if train:\r\n info_path = '/Info_Files/Ebay_train.txt'\r\n else:\r\n info_path = '/Info_Files/Ebay_test.txt'\r\n files = pd.read_csv(root+info_path, header=0, delimiter=' ',usecols=['path','class_id'])[['path','class_id']]\r\n #print(files.to_dict(orient='records'))\r\n self.data = files.to_dict(orient='record')\r\n self.image_path = image_path\r\n self.transform = transform\r\n #print(type(self.data[1]['class_id']))\r\n #def\r\n def __getitem__(self,index):\r\n image = Image.open(root + '/'+ self.image_path + '/' + self.data[index]['path'])\r\n #print ('{0}=>{1},{2}'.format(self.data[index]['path'],image.size,image.mode))\r\n #print ('{0}=>{1}'.format(self.data[index]['path'],image.size))\r\n if (image.mode != 'RGB'):\r\n #print ('{0}=>{1}'.format(self.data[index]['path'],image.mode))\r\n image = image.convert('RGB')\r\n\r\n trans = self.transform(image)\r\n #image = trans(image)\r\n #print (trans.size())\r\n #print('from get: \\n') \r\n #print(type(self.data[index]) )\r\n return trans, self.data[index]['class_id']\r\n #{'image':im, 'target':self.data[index]['class_id']}\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n\r\nclass CustomerToShop(Dataset) :\r\n\r\n def __init__(self,root,transform,train=True):\r\n\r\n files = pd.read_csv(root+'/Eval/list_eval_partition_new.txt', header=0, delimiter='\\t',skiprows=1)[['image_path','item_id','evaluation_status']]\r\n\r\n ##image_name\titem_id\tevaluation_status\r\n\r\n if train:\r\n str_query = \"evaluation_status == 'train'\"\r\n else:\r\n str_query = \"evaluation_status == 'test' \" #or evaluation_status == 'val' \"\r\n\r\n\r\n #print(files.to_dict(orient='records'))\r\n #print (files.to_dict(orient='record'))\r\n\r\n self.data = files.query(str_query).to_dict(orient='record')\r\n #self.image_path = image_path\r\n for dt in self.data :\r\n dt['item_id'] = int(dt['item_id'][3:].strip('0'))\r\n\r\n self.transform = transform\r\n #print(type(self.data['item_id']))\r\n #print(len(self.data))\r\n\r\n \r\n #def\r\n def __getitem__(self,index):\r\n image = Image.open(root + '/'+ self.data[index]['image_path'])\r\n #image.show()\r\n #print (self.data[index])\r\n if (image.mode != 'RGB'):\r\n image = image.convert('RGB')\r\n trans = self.transform(image)\r\n #image = trans(image)\r\n \r\n #print('from get: \\n') \r\n #print(type(itemid))\r\n \r\n \r\n return trans, self.data[index]['item_id']\r\n\r\n #return self.transform(image), self.data[index]['class_id']\r\n #{'image':im, 'target':self.data[index]['class_id']}\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n\r\n\r\n\r\n\r\n#class DatasetConfig:\r\n# source_path=''\r\n# image_path=''\r\n#\r\n#\r\n#def getOnlineProducts(conf, train=True) :\r\n# #read text flie\r\n# if train :\r\n# #files = pd.read_table(conf.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ',usecols=['path','class_id'])\r\n# files = pd.read_csv(conf.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ',usecols=['path','class_id'])[['path','class_id']]\r\n# \r\n# else: \r\n# files = pd.read_table(conf.source_path+'/Info_Files/Ebay_test.txt', header=0, delimiter=' ', usecols=['path','class_id'])\r\n# #print(\"training files :\\n {0}\".format(training_set['path'][0]))\r\n# #print(\"test files :\\n {0}\".format(test_files))\r\n## with open(conf.source_path+'/Info_Files/Ebay_train.txt',newline='') as csvfile:\r\n## training_set = csv.DictReader(csvfile)\r\n## for row in training_set:\r\n## print(\"training dict :\\n {0}\".format(row))\r\n## \r\n# #training_set = training_files['path']['class_id'][:]\r\n# return files.values.tolist()\r\n\r\n\r\n\r\n\r\n# record_keeper is a useful package for logging data during training and testing\r\n# You can use the trainers and testers without record_keeper.\r\n# But if you'd like to install it, then do pip install record_keeper\r\n# See more info about it here https://github.com/KevinMusgrave/record_keeper\r\ntry:\r\n import os\r\n import errno\r\n import record_keeper as record_keeper_package\r\n from torch.utils.tensorboard import SummaryWriter\r\n\r\n def makedir_if_not_there(dir_name):\r\n try:\r\n os.makedirs(dir_name)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n\r\n pkl_folder = \"dml_dist_margin_logs\"\r\n tensorboard_folder = \"dml_dist_margin_tensorboard\"\r\n makedir_if_not_there(pkl_folder)\r\n makedir_if_not_there(tensorboard_folder)\r\n pickler_and_csver = record_keeper_package.PicklerAndCSVer(pkl_folder)\r\n tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)\r\n record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer, pickler_and_csver, [\"record_these\", \"learnable_param_names\"])\r\n\r\nexcept ModuleNotFoundError:\r\n record_keeper = None\r\n\r\n\r\n##############################\r\n########## Training ##########\r\n##############################\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n#print(type(device))\r\n\r\n# Set trunk model and replace the softmax layer with an identity function\r\n#trunk = models.resnet50(pretrained=True)\r\n#trunk = torch.hub.load('pytorch/vision:v0.5.0', 'mobilenet_v2', pretrained=True)\r\ntrunk = mobilenet_v2(pretrained=True)\r\n#print(trunk.last_channel)\r\n#trunk = torch.load('online_product_trunk.pth')\r\ntrunk_output_size = trunk.last_channel\r\n#trunk.fc = Identity()\r\n\r\n\r\n#trunk = torch.hub.load('pytorch/vision:v0.5.0', 'mobilenet_v2', pretrained=True)\r\n#trunk_output_size = trunk.fc.in_features\r\n#trunk.fc = Identity()\r\n#trunk.fc = Normalize(trunk_output_size)\r\n#trunk = torch.nn.DataParallel(trunk.to(device))\r\ntrunk = trunk.to(device)\r\n\r\n\r\n\r\n# Set embedder model. This takes in the output of the trunk and outputs 64 dimensional embeddings\r\n#embedder = torch.nn.DataParallel(MLP([trunk_output_size, 64]).to(device))\r\nembedder = MLP([trunk_output_size, 512]).to(device)\r\n#embedder = torch.nn.Linear(trunk_output_size,512).to(device)\r\n#embedder = torch.load('online_product_embedder.pth')\r\n\r\n\r\n# Set optimizers\r\ntrunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.00001, weight_decay=0.00005)\r\nembedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.00001, weight_decay=0.00005)\r\n\r\n# Set the image transform\r\n'''\r\nimg_transform = transforms.Compose([transforms.Resize(256),\r\n transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=227),\r\n transforms.RandomHorizontalFlip(0.5),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n'''\r\n\r\n\r\nimg_transform_train = transforms.Compose([transforms.RandomResizedCrop(size=227),\r\n transforms.RandomHorizontalFlip(0.5),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n\r\n\r\nimg_transform_test = transforms.Compose([transforms.Resize(256),\r\n transforms.CenterCrop(227),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n\r\n\r\n\r\n# Set the datasets\r\n#train_dataset = datasets.CIFAR100(root=\"CIFAR100_Dataset\", train=True, transform=img_transform, download=True)\r\n#val_dataset = datasets.CIFAR100(root=\"CIFAR100_Dataset\", train=False, transform=img_transform, download=True)\r\n\r\n#print(train_dataset)\r\n#print(type(train_dataset))\r\n\r\n \r\n#train_dataset = getOnlineProducts(conf, train=True)\r\n#val_dataset = getOnlineProducts(conf,train=False)\r\n#\r\n#\r\n\r\n\r\nroot = '/home/m405305/Deep-Metric-Learning-Baselines/Datasets/online_products'\r\nimage_path = 'images'\r\ntrain_dataset = StandfordProducts(root,image_path,transform=img_transform_train,train=True)\r\nval_dataset = StandfordProducts(root,image_path,transform=img_transform_test,train=False)\r\n\r\n\r\n#root = '/home/m405305/dataset'\r\n#train_dataset = Cub2011(root,transform=img_transform_train,train=True,download=False)\r\n#val_dataset = Cub2011(root,transform=img_transform_test,train=False,download=False)\r\n\r\n'''\r\nroot = '/home/m405305/Deep-Metric-Learning-Baselines/Datasets/cust-shop'\r\nimage_path = 'images'\r\ntrain_dataset = CustomerToShop(root,transform=img_transform_train,train=True)\r\nval_dataset = CustomerToShop(root,transform=img_transform_test,train=False)\r\n'''\r\n\r\n#print (type(val_dataset.__getitem__(10)))\r\n\r\n\r\n\r\n\r\n# Set the loss function\r\nloss = losses.TripletMarginLoss(margin=0.01)\r\n#loss = losses.MarginLoss(margin=0.01,nu=1.2,beta=0)\r\n#loss = losses.ContrastiveLoss()\r\n\r\n# Set the mining function\r\n#miner = miners.MultiSimilarityMiner(epsilon=0.1)\r\n#miner = miners.DistanceWeightedMiner(cutoff=0, nonzero_loss_cutoff=0.5)\r\nminer = miners.TripletMarginMiner(margin=0.01,type_of_triplets='semihard')\r\n\r\n\r\n\r\n\r\n\r\n# Set other training parameters\r\nbatch_size = 40\r\nnum_epochs = 1\r\niterations_per_epoch = 10\r\n\r\n# Package the above stuff into dictionaries.\r\nmodels = {\"trunk\": trunk, \"embedder\": embedder}\r\noptimizers = {\"trunk_optimizer\": trunk_optimizer, \"embedder_optimizer\": embedder_optimizer}\r\nloss_funcs = {\"metric_loss\": loss}\r\nmining_funcs = {\"post_gradient_miner\": miner}\r\n\r\ntrainer = trainers.MetricLossOnly(models,\r\n optimizers,\r\n batch_size,\r\n loss_funcs,\r\n mining_funcs,\r\n iterations_per_epoch,\r\n train_dataset,\r\n record_keeper=record_keeper)\r\n\r\ntrainer.train(num_epochs=num_epochs)\r\n\r\n#torch.save(trainer.models['trunk'],'online_product_trunk.pth')\r\n#torch.save(trainer.models['embedder'],'online_product_embedder.pth')\r\n\r\n\r\n#############################\r\n########## Testing ##########\r\n############################# \r\n\r\n# The testing module requires faiss and scikit-learn\r\n# So if you don't have these, then this import will break\r\nfrom pytorch_metric_learning import testers\r\n\r\n#tester = testers.GlobalEmbeddingSpaceTester(reference_set=\"compared_to_sets_combined\", record_keeper=record_keeper)\r\ntester = testers.GlobalEmbeddingSpaceTester(record_keeper=record_keeper)\r\ndataset_dict = {\"train\": train_dataset, \"val\": val_dataset}\r\nepoch = 1\r\n\r\ntester.test(dataset_dict, epoch, trunk, embedder)\r\n\r\nif record_keeper is not None:\r\n record_keeper.pickler_and_csver.save_records()\r\n ", "sub_path": "example_MetricLossOnly.py", "file_name": "example_MetricLossOnly.py", "file_ext": "py", "file_size_in_byte": 12910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 75, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 89, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 108, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 112, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 137, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 137, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 199, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 201, "usage_type": "attribute"}, {"api_name": "record_keeper.PicklerAndCSVer", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 209, "usage_type": "call"}, {"api_name": "record_keeper.RecordKeeper", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.nn.device", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 220, "usage_type": "name"}, {"api_name": "torch.nn.cuda.is_available", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn.cuda", "line_number": 220, "usage_type": "attribute"}, {"api_name": "mobilenet.mobilenet_v2", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn.optim.Adam", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.nn.optim", "line_number": 250, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 250, "usage_type": "name"}, {"api_name": "torch.nn.optim.Adam", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.nn.optim", "line_number": 251, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 251, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 263, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 263, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 263, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 264, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 264, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 265, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 265, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 266, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 266, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 269, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 269, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 269, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 270, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 270, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 271, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 271, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 272, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 272, "usage_type": "name"}, {"api_name": "pytorch_metric_learning.losses.TripletMarginLoss", "line_number": 313, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.losses", "line_number": 313, "usage_type": "name"}, {"api_name": "pytorch_metric_learning.miners.TripletMarginMiner", "line_number": 320, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.miners", "line_number": 320, "usage_type": "name"}, {"api_name": "torchvision.models", "line_number": 332, "usage_type": "name"}, {"api_name": "pytorch_metric_learning.trainers.MetricLossOnly", "line_number": 337, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 337, "usage_type": "argument"}, {"api_name": "pytorch_metric_learning.trainers", "line_number": 337, "usage_type": "name"}, {"api_name": "pytorch_metric_learning.testers.GlobalEmbeddingSpaceTester", "line_number": 361, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.testers", "line_number": 361, "usage_type": "name"}, {"api_name": "record_keeper.pickler_and_csver.save_records", "line_number": 368, "usage_type": "call"}, {"api_name": "record_keeper.pickler_and_csver", "line_number": 368, "usage_type": "attribute"}]} +{"seq_id": "204443424", "text": "import re\nimport json\nimport glob\n\n\nfor filpath in glob.glob('LTETrace************'):\n with open(filpath, 'r') as ltefile:\n ignore = {'Zeit', '>>>>>>>>>>>>>>>>>>>>>>>>', '!GSTATUS: ', '!LTEINFO:', '--', '2017'}\n onestring = {'EMM', 'RRC', 'IMS', 'SINR', 'InterFreq', 'LTE CA state', 'GSM', 'WCDMA', 'CDMA 1x'}\n data = {}\n myLTE = {}\n cont = []\n a = 0\n for line in ltefile:\n if line.strip():\n if any(item in line for item in ignore):\n continue\n elif any(item in line for item in onestring):\n line = line.strip().split(':')\n item = [i.strip().replace(' \\t', '-') for i in line]\n data[item[0]] = item[1]\n elif line.startswith('LTE Pegel'):\n # cont.append[0]\n time = next(ltefile)\n try:\n time = next(ltefile).replace(' ', 'T').replace('\\n', '')\n data['time utc'] = time\n except ValueError as e:\n continue\n\n\n\n elif line.startswith('Serving'):\n\n fields = line[8:].replace('\\n', '').split(' ')\n fields = [item for item in fields if item]\n\n values = next(ltefile)\n values = values.replace('\\n', '').split(' ')\n values = [item for item in values if item]\n\n d = dict(zip(fields, values))\n\n data['Serving'] = d\n # print(values)\n elif line.startswith('IntraFreq'):\n fields = line[10:].split()\n values = []\n temp = []\n for i in range(0, 100):\n myline = next(ltefile)\n if not myline.isspace():\n myvalues = myline.replace('\\n', '').split(' ')\n myvalues = [item for item in myvalues if item]\n temp.append(myvalues)\n else:\n break\n\n values = list(zip(*temp))\n values = [list(item) for item in values]\n d = dict(zip(fields, values))\n data['IntraFreq'] = d\n\n\n\n\n elif line.startswith('CDMA HRPD:'):\n data['CMDA HRPD'] = line[10:].strip()\n myLTE[a] = data\n a = a + 1\n data = {}\n\n elif 'PCC' in line:\n field = line[:12]\n item = re.findall(r'[-+]?\\d+(?:\\.\\d+)?', line)\n data[field] = {}\n data[field]['value'] = item[0]\n data[field]['RSRP (dBm)'] = item[1]\n\n else:\n\n if line.startswith('System mode'):\n line = line.strip('\\n').strip().split('\\t')\n else:\n line = line.strip().split('\t\t')\n for item in line:\n item = item.split(':')\n item = [i.strip() for i in item]\n field = item[0]\n value = item[1]\n data[field] = value\n\n\n # keylist = myLTE.keys()\n # keylist.sort()\n # for key in keylist:\n # print \"%s: %s\" % (key, myLTE[key])\n\n jsonData = json.dumps(myLTE) # Save Python dictionary as JSON File\n with open('JSONLTEData.json', 'a') as f:\n f.write(jsonData + '\\n')\n\n\n\nprint (\"Text file containing LTE Measurements parsed into JSON File.\")", "sub_path": "LTE_converter.py", "file_name": "LTE_converter.py", "file_ext": "py", "file_size_in_byte": 3809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "glob.glob", "line_number": 6, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 75, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "262289428", "text": "# import libraries\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport json\nfrom pymongo import MongoClient\nimport sys\nimport time\nsys.stdout = open('file', 'w', encoding=\"utf-8\")\n\n\nurl = \"https://www.nike.com/w/new-shoes-3n82yzy7ok\"\n# run firefox webdriver from executable path of your choice\ndriver = webdriver.Firefox()\n\n# get web page\ndriver.get(url)\n# execute script to scroll down the page\ndriver.maximize_window()\ntime.sleep(5)\ndriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n\n#connect to database\nclient = MongoClient(\"mongodb+srv://rjain9:Ilikepie16%21@cluster0-wgm3y.mongodb.net/test?retryWrites=true&w=majority\")\ndb = client[\"Shoes\"]\nmycol = db[\"nike\"]\naTagsInLi = driver.find_elements_by_xpath(\"//div[@class='product-card css-1ikfoht css-z5nr6i css-11ziap1 css-zk7jxt css-dpr2cn product-grid__card ']\")\n \nline_items=[]\nfor a in aTagsInLi:\n \n print(\"here\")\n #get div container for image details\n img = a.find_element_by_tag_name('img')\n #get div for site line\n siteDiv = a.find_element_by_tag_name('a')\n\n #get name of shoe\n name = img.get_attribute('alt')\n #get image url\n image_url = img.get_attribute('src')\n #get site link\n site = siteDiv.get_attribute('href')\n #get category of shoe\n category = a.find_element_by_class_name('product-card__subtitle').text\n\n #determine gender\n if \"Men\" in category:\n gender = \"Male\"\n elif \"Women\" in category:\n gender = \"Female\"\n elif \"Kid\" in category or \"Baby\" in category or \"Toddler\" in category:\n gender = \"Kid\"\n else:\n gender = \"Unisex\"\n\n #create json object for database\n myjson3 = {\n 'name': name,\n 'image_url': image_url,\n 'site': site,\n 'category': category,\n 'gender': gender,\n 'brand' : 'Nike'\n }\n print(myjson3)\n line_items.append(myjson3)\n\n#clear existing db\nmycol.delete_many({})\n#insert new elements into db\nmycol.insert_many(line_items)\n\n \n\n\n\n\n", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 2187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stdout", "line_number": 9, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "312064231", "text": "from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras import backend as keras\nimport numpy as np \nimport os\nimport glob\nimport cv2\n\ndef merge_and_save():\n\timgtype = \"jpg\"\n\ttrain = glob.glob(\"results/*.\"+imgtype)\n\tfor i in range(len(train)):\n\t\tif i is not 54 and i is not 72:\n\t\t\timg_t = load_img(\"test/\"+str(i)+\".\"+imgtype)\n\t\t\timg_l = load_img(\"test/\"+str(i)+\"_l.\"+imgtype)\n\t\t\timg_p = load_img(\"results/\"+str(i)+\".\"+imgtype)\n\t\t\t\n\t\t\tx_t = img_to_array(img_t)\n\t\t\tx_l = img_to_array(img_l)\n\t\t\tx_t[:,:,2] = x_l[:,:,0]\n\t\t\timg_tmp = array_to_img(x_t)\n\t\t\timg_tmp.save(\"merged/\"+str(i)+\".\"+imgtype)\n\t\t\tx_tp = img_to_array(img_t)\n\t\t\tx_p = img_to_array(img_p)\n\t\t\tx_tp[:,:,2] = x_p[:,:,0]\n\t\t\timg_tmp = array_to_img(x_tp)\n\t\t\timg_tmp.save(\"merged/\"+str(i)+\"_p.\"+imgtype)\n\t\t\t'''\n\t\t\tx_l = img_to_array(img_l)\n\t\t\tx_p = img_to_array(img_p)\n\t\t\ttmp = np.asarray(x_p).astype(np.bool)\n\t\t\timg_tmp = array_to_img(tmp)\n\t\t\timg_tmp.save(\"bool/\"+str(i)+\".\"+imgtype)\n\t\t\ttmp = np.asarray(x_l).astype(np.bool)\n\t\t\timg_tmp = array_to_img(tmp)\n\t\t\timg_tmp.save(\"bool/\"+str(i)+\"_t.\"+imgtype)\n\t\t\t'''\n\ndef dice_coef(gt, seg):\n\tgt = np.asarray(gt).astype(np.bool)\n\tseg = np.asarray(seg).astype(np.bool)\n\tintersection = np.logical_and(gt, seg)\n\treturn intersection.sum()*2.0 / (np.sum(seg) + np.sum(gt))\n\n\ndef calculate_dice():\n\timgtype = \"jpg\"\n\ttrain = glob.glob(\"results/*.\"+imgtype)\n\tdice_sum = 0\n\tfor i in range(len(train)):\n\t\tif i is not 54 and i is not 72:\n\t\t\timg_l = load_img(\"test/\"+str(i)+\"_l.\"+imgtype)\n\t\t\timg_p = load_img(\"results/\"+str(i)+\".\"+imgtype)\n\t\t\tx_l = img_to_array(img_l)\n\t\t\tx_p = img_to_array(img_p)\n\t\t\tdice = dice_coef(x_l, x_p)\n\t\t\tdice_sum += dice\n\t\t\tprint(i)\n\t\t\tprint(dice)\n\tprint(dice_sum / len(train))\n\n\n\nif __name__ == \"__main__\":\n\tmerge_and_save()\n\tcalculate_dice()", "sub_path": "first/merge_imgs.py", "file_name": "merge_imgs.py", "file_ext": "py", "file_size_in_byte": 1806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "glob.glob", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.array_to_img", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.array_to_img", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "55379322", "text": "# coding: utf-8\nimport requests\nimport polling\nimport asyncio\nimport logging\n\nfrom aiohttp import ClientSession\n\nfrom time import sleep\n\n\nclass ShutterManager:\n\n def __init__(self, address):\n self.address = address\n self.logger = logging.getLogger('blebox.ShutterManager')\n\n def __repr__(self):\n return self.address\n\n def up(self, *args):\n url = 'http://{}/s/u'.format(self.address)\n return self._send_command(url)\n\n def down(self, *args):\n url = 'http://{}/s/d'.format(self.address)\n return self._send_command(url)\n\n def stop(self, *args):\n url = 'http://{}/s/s'.format(self.address)\n return self._send_command(url)\n\n def position(self, position):\n url = 'http://{}/s/p/{}'.format(self.address, position)\n return self._send_command(url)\n\n def current_position(self, do_async=False):\n url = 'http://{}/api/shutter/state'.format(self.address)\n if do_async == True:\n return self._send_command(url)\n else:\n result = requests.get(url)\n return result.json()['currentPos']['position']\n\n def is_in_position(self, position):\n try:\n return polling.poll(lambda: self.current_position() == position, step=1, timeout=600)\n except polling.TimeoutException:\n return False\n \n async def tilt(self, *args):\n if not args:\n time = 0.8\n else:\n time = args[0]\n await self.down()\n self.is_in_position(100)\n await self.up()\n await asyncio.sleep(time)\n await self.stop()\n\n async def _send_command(self, url):\n async with ClientSession() as session:\n async with session.get(url) as response:\n try:\n json = await response.json()\n except Exception as e:\n self.logger.exception(e)\n return json, response.status\n", "sub_path": "blebox/shutter.py", "file_name": "shutter.py", "file_ext": "py", "file_size_in_byte": 1955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "polling.poll", "line_number": 47, "usage_type": "call"}, {"api_name": "polling.TimeoutException", "line_number": 48, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "382494602", "text": "from typing import Any, Dict, Iterable, cast\n\nfrom openslides_backend.action.actions.meeting.shared_meeting import (\n meeting_projector_default_replacements,\n)\nfrom tests.system.action.base import BaseActionTestCase\n\n\nclass MeetingCreateActionTest(BaseActionTestCase):\n def basic_test(self, datapart: Dict[str, Any]) -> Dict[str, Any]:\n self.create_model(\"committee/1\", {\"name\": \"test_committee\", \"member_ids\": [2]})\n self.create_model(\"group/1\")\n self.create_model(\"user/2\")\n\n response = self.request(\n \"meeting.create\",\n {\n \"name\": \"test_name\",\n \"committee_id\": 1,\n \"welcome_title\": \"test_wel_title\",\n **datapart,\n },\n )\n self.assert_status_code(response, 200)\n return self.get_model(\"meeting/1\")\n\n def test_create_simple(self) -> None:\n meeting = self.basic_test(dict())\n self.assertCountEqual(\n cast(Iterable[Any], meeting.get(\"default_projector_$_id\")),\n meeting_projector_default_replacements,\n )\n self.assert_model_exists(\n \"meeting/1\",\n {\n \"name\": \"test_name\",\n \"committee_id\": 1,\n \"group_ids\": [2, 3, 4, 5, 6],\n \"default_group_id\": 2,\n \"admin_group_id\": 3,\n \"motion_workflow_ids\": [1],\n \"motions_default_workflow_id\": 1,\n \"motions_default_amendment_workflow_id\": 1,\n \"motions_default_statute_amendment_workflow_id\": 1,\n \"motion_state_ids\": [1, 2, 3, 4],\n \"user_ids\": [1],\n \"list_of_speakers_countdown_id\": 1,\n \"poll_countdown_id\": 2,\n },\n )\n self.assert_model_exists(\"group/2\", {\"name\": \"Default\"})\n self.assert_model_exists(\"group/3\", {\"name\": \"Admin\", \"user_ids\": [1]})\n self.assert_model_exists(\"group/4\", {\"name\": \"Delegates\"})\n self.assert_model_exists(\"group/5\", {\"name\": \"Staff\"})\n self.assert_model_exists(\"group/6\", {\"name\": \"Committees\"})\n self.assert_model_exists(\n \"motion_workflow/1\",\n {\n \"name\": \"Simple Workflow\",\n \"meeting_id\": 1,\n \"default_workflow_meeting_id\": 1,\n \"default_amendment_workflow_meeting_id\": 1,\n \"default_statute_amendment_workflow_meeting_id\": 1,\n \"state_ids\": [1, 2, 3, 4],\n \"first_state_id\": 1,\n },\n )\n self.assert_model_exists(\n \"motion_state/1\", {\"name\": \"submitted\", \"next_state_ids\": [2, 3, 4]}\n )\n self.assert_model_exists(\n \"motion_state/2\",\n {\n \"name\": \"accepted\",\n \"previous_state_ids\": [1],\n \"meeting_id\": 1,\n \"workflow_id\": 1,\n },\n )\n self.assert_model_exists(\n \"motion_state/3\", {\"name\": \"rejected\", \"previous_state_ids\": [1]}\n )\n self.assert_model_exists(\n \"motion_state/4\", {\"name\": \"not_decided\", \"previous_state_ids\": [1]}\n )\n projector1 = self.get_model(\"projector/1\")\n self.assertCountEqual(\n cast(Iterable[Any], projector1.get(\"used_as_default_$_in_meeting_id\")),\n meeting_projector_default_replacements,\n )\n self.assert_model_exists(\n \"projector/1\",\n {\n \"name\": \"Default projector\",\n \"meeting_id\": 1,\n \"used_as_reference_projector_meeting_id\": 1,\n }.update(\n {\n f\"used_as_default_${name}_in_meeting_id\": 1\n for name in meeting_projector_default_replacements\n }\n ),\n )\n self.assert_model_exists(\n \"user/1\",\n {\n \"group_$1_ids\": [3], # meeting/1 and group 3\n \"group_$_ids\": [\"1\"], # only meeting/1 values\n },\n )\n self.assert_model_exists(\n \"projector_countdown/1\",\n {\n \"title\": \"List of speakers countdown\",\n \"meeting_id\": 1,\n \"used_as_list_of_speaker_countdown_meeting_id\": 1,\n \"default_time\": 60,\n \"countdown_time\": 60,\n },\n )\n self.assert_model_exists(\n \"projector_countdown/2\",\n {\n \"title\": \"Voting countdown\",\n \"meeting_id\": 1,\n \"used_as_poll_countdown_meeting_id\": 1,\n \"default_time\": 60,\n \"countdown_time\": 60,\n },\n )\n\n def test_check_action_data_fields(self) -> None:\n meeting = self.basic_test(\n {\n \"welcome_text\": \"htXiSgbj\",\n \"description\": \"RRfnzxHA\",\n \"location\": \"LSFHPTgE\",\n \"start_time\": 1608120653,\n \"end_time\": 1608121653,\n \"url_name\": \"JWdYZqDX\",\n \"enable_anonymous\": False,\n \"guest_ids\": [2],\n }\n )\n assert meeting.get(\"welcome_text\") == \"htXiSgbj\"\n assert meeting.get(\"description\") == \"RRfnzxHA\"\n assert meeting.get(\"location\") == \"LSFHPTgE\"\n assert meeting.get(\"start_time\") == 1608120653\n assert meeting.get(\"end_time\") == 1608121653\n assert meeting.get(\"url_name\") == \"JWdYZqDX\"\n assert meeting.get(\"enable_anonymous\") is False\n assert meeting.get(\"guest_ids\") == [2]\n assert meeting.get(\"user_ids\") == [1, 2]\n user_2 = self.get_model(\"user/2\")\n assert user_2.get(\"guest_meeting_ids\") == [1]\n\n def test_guest_ids_error(self) -> None:\n self.create_model(\"committee/1\", {\"name\": \"test_committee\", \"member_ids\": [2]})\n self.create_model(\"user/2\")\n self.create_model(\"user/3\")\n\n response = self.request(\n \"meeting.create\",\n {\n \"name\": \"test_name\",\n \"committee_id\": 1,\n \"welcome_title\": \"test_wel_title\",\n \"guest_ids\": [2, 3],\n },\n )\n\n self.assert_status_code(response, 400)\n self.assertIn(\n \"Guest-ids {3} are not part of committee-member or manager_ids.\",\n response.json[\"message\"],\n )\n", "sub_path": "tests/system/action/meeting/test_create.py", "file_name": "test_create.py", "file_ext": "py", "file_size_in_byte": 6420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tests.system.action.base.BaseActionTestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 10, "usage_type": "name"}, {"api_name": "openslides_backend.action.actions.meeting.shared_meeting.meeting_projector_default_replacements", "line_number": 31, "usage_type": "argument"}, {"api_name": "typing.cast", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 30, "usage_type": "name"}, {"api_name": "openslides_backend.action.actions.meeting.shared_meeting.meeting_projector_default_replacements", "line_number": 89, "usage_type": "argument"}, {"api_name": "typing.cast", "line_number": 88, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 88, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 88, "usage_type": "name"}, {"api_name": "openslides_backend.action.actions.meeting.shared_meeting.meeting_projector_default_replacements", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "430322468", "text": "import SimpleITK as sitk\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom dltk.io.preprocessing import whitening\n\n\"\"\"\nimg: simpleitk input image\nangle: radian angle to rotate around the z axis\nsize: voxel size for resampled data\n\"\"\"\n\n\ndef rotate_image(img, angle, size=[64, 64, 64], is_label=False):\n rotation_center = (0, 0, 0)\n rotation = sitk.VersorTransform(R.from_euler('Z', angle).as_quat(), rotation_center)\n\n rigid_versor = sitk.VersorRigid3DTransform()\n rigid_versor.SetRotation(rotation.GetVersor())\n rigid_versor.SetCenter(rotation_center)\n\n out_origin, out_size, out_spacing = get_output_parameters(img, rigid_versor, size)\n\n resample_filter = sitk.ResampleImageFilter()\n resample_filter.SetTransform(rigid_versor)\n\n if is_label:\n resample_filter.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample_filter.SetInterpolator(sitk.sitkBSpline)\n\n resample_filter.SetSize(size)\n resample_filter.SetOutputOrigin(out_origin)\n resample_filter.SetOutputSpacing(out_spacing)\n\n resample_filter.SetOutputDirection(img.GetDirection())\n if is_label:\n resample_filter.SetOutputPixelType(sitk.sitkUInt8)\n else:\n resample_filter.SetOutputPixelType(sitk.sitkFloat32)\n resample_filter.SetDefaultPixelValue(0.0)\n\n output_img = resample_filter.Execute(img)\n\n if is_label:\n return sitk.GetArrayFromImage(output_img)\n else:\n return whitening(sitk.GetArrayFromImage(output_img))\n\n\n\"\"\"\nimg: simpleitk input image\naxes: 1 for no flip, -1 for a flip of array of (int, 3) \nsize: voxel size for resampled data\n\"\"\"\n\n\ndef flip_image(img, axes=[1, -1, 1], size=[64, 64, 64], is_label=False):\n out_origin, out_size, out_spacing = get_output_parameters(img, sitk.Transform(3, sitk.sitkIdentity), size)\n\n rotation_center = (0, 0, 0)\n rotation = sitk.VersorTransform(np.array([0., 0., 0., 1.]), rotation_center)\n\n rigid_versor = sitk.VersorRigid3DTransform()\n rigid_versor.SetRotation(rotation.GetVersor())\n rigid_versor.SetCenter(rotation_center)\n\n rigid_versor.SetMatrix([axes[0], 0, 0, 0, axes[1], 0, 0, 0, axes[2]])\n\n resample_filter = sitk.ResampleImageFilter()\n resample_filter.SetTransform(rigid_versor)\n if is_label:\n resample_filter.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample_filter.SetInterpolator(sitk.sitkBSpline)\n\n resample_filter.SetSize(size)\n resample_filter.SetOutputOrigin(img.GetOrigin())\n resample_filter.SetOutputSpacing(out_spacing)\n\n resample_filter.SetOutputDirection(img.GetDirection())\n if is_label:\n resample_filter.SetOutputPixelType(sitk.sitkUInt8)\n else:\n resample_filter.SetOutputPixelType(sitk.sitkFloat32)\n resample_filter.SetDefaultPixelValue(0.0)\n\n output_img = resample_filter.Execute(img)\n\n if is_label:\n return sitk.GetArrayFromImage(output_img)\n else:\n return whitening(sitk.GetArrayFromImage(output_img))\n\n\n\"\"\"\ngiven an image and a transform, provide the transformed bounds \n\nreturns: output_origin, size and spacing based on a given transform \n\noutput_origin : the origin of the image given a transform\noutput_spacing: the spacing given the size input set at 64 voxels as a default.\noutput_size : the size given the input image spacing\n\n\"\"\"\n\n\ndef get_output_parameters(image, transform, size=[64, 64, 64]):\n # origin and maximum of the transformed image.\n x0, y0, z0 = image.GetOrigin()\n x1, y1, z1 = image.TransformIndexToPhysicalPoint(image.GetSize())\n\n trans_pts = []\n for x in (x0, x1):\n for y in (y0, y1):\n for z in (z0, z1):\n trans_pt = transform.GetInverse().TransformPoint((x, y, z))\n trans_pts.append(trans_pt)\n\n min_arr = np.array(trans_pts).min(axis=0)\n max_arr = np.array(trans_pts).max(axis=0)\n\n output_origin = min_arr\n output_size = np.round(((max_arr - min_arr) / image.GetSpacing())).astype(int)\n output_spacing = ((max_arr - min_arr) / size).astype(float)\n # print(output_size)\n return output_origin, output_size.tolist(), output_spacing.tolist()\n\n\n\"\"\"\nPre-process and augment data (if defined)\nReturns a list of all pre-processed/augmented data volumes as tuples\n\"\"\"\n\n\ndef preprocess(volume_list, augment_data=False):\n preprocessed_volumes = []\n for volume_tuple in volume_list:\n bmode, pd, label = load_volumes(volume_tuple)\n preprocessed_volumes.append((bmode, pd, label))\n if augment_data:\n preprocessed_volumes += augment(volume_tuple)\n return preprocessed_volumes\n\n\n\"\"\"\nAugments tuple of volumes (BMode, PD, Label) and returns a list of all augmented volumes (as tuples)\nTO-DO: Currently just returns an array of the same volumes as a tuple array but should insert logic here\nNOTE: It should not return the input volumes in the return array since its already added to the full volume list\n ONLY append the augmentations\n\"\"\"\n\n\ndef augment(volume_tuple):\n bmode, pd, label = sitk.ReadImage(volume_tuple[0], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[1], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[2], sitk.sitkUInt8)\n augmented_tuples = []\n size = [64, 64, 64]\n\n # initial go - -20 to +20 degrees (5 deg increment) no zero\n # angles = array([-0.34906585, -0.26179939, -0.17453293, -0.08726646, 0.08726646, 0.17453293, 0.26179939, 0.34906585])\n # now with more angles (-40 + 40) in 4 degree increments... to get to ~27 we augmented with prior\n angles = np.linspace(-np.pi / 18, np.pi / 18, 11)\n angles = angles[angles != 0]\n\n for rad in angles:\n augmented_tuples.append((rotate_image(bmode, rad, size), rotate_image(pd, rad, size),\n rotate_image(label, rad, size, True)))\n\n axes_flip = [-1, 1, 1], [1, -1, 1], [-1, -1, 1]\n for a in axes_flip:\n augmented_tuples.append(\n (flip_image(bmode, a, size), flip_image(pd, a, size), flip_image(label, a, size, True)))\n\n return augmented_tuples\n\n\ndef load_volumes(volume_tuple):\n bmode, pd, label = sitk.ReadImage(volume_tuple[0], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[1], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[2], sitk.sitkUInt8)\n bmode_vol = sitk.GetArrayFromImage(bmode)\n pd_vol = sitk.GetArrayFromImage(pd)\n label_vol = sitk.GetArrayFromImage(label)\n return whitening(bmode_vol), whitening(pd_vol), label_vol\n", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 6419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "50", "api": [{"api_name": "SimpleITK.VersorTransform", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_euler", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 15, "usage_type": "name"}, {"api_name": "SimpleITK.VersorRigid3DTransform", "line_number": 17, "usage_type": "call"}, {"api_name": "SimpleITK.ResampleImageFilter", "line_number": 23, "usage_type": "call"}, {"api_name": "SimpleITK.sitkNearestNeighbor", "line_number": 27, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkBSpline", "line_number": 29, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkUInt8", "line_number": 37, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 45, "usage_type": "call"}, {"api_name": "dltk.io.preprocessing.whitening", "line_number": 47, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 47, "usage_type": "call"}, {"api_name": "SimpleITK.Transform", "line_number": 58, "usage_type": "call"}, {"api_name": "SimpleITK.sitkIdentity", "line_number": 58, "usage_type": "attribute"}, {"api_name": "SimpleITK.VersorTransform", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "SimpleITK.VersorRigid3DTransform", "line_number": 63, "usage_type": "call"}, {"api_name": "SimpleITK.ResampleImageFilter", "line_number": 69, "usage_type": "call"}, {"api_name": "SimpleITK.sitkNearestNeighbor", "line_number": 72, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkBSpline", "line_number": 74, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkUInt8", "line_number": 82, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 84, "usage_type": "attribute"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 90, "usage_type": "call"}, {"api_name": "dltk.io.preprocessing.whitening", "line_number": 92, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 123, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 154, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 154, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkUInt8", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 161, "usage_type": "attribute"}, {"api_name": "SimpleITK.ReadImage", "line_number": 177, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 177, "usage_type": "attribute"}, {"api_name": "SimpleITK.sitkUInt8", "line_number": 177, "usage_type": "attribute"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 178, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 179, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 180, "usage_type": "call"}, {"api_name": "dltk.io.preprocessing.whitening", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "253042468", "text": "from flask import Flask, request, abort\n\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.exceptions import (InvalidSignatureError)\nfrom linebot.models import *\n\nfrom engine.currencySearch import currencySearch\nfrom engine.AQI import AQImonitor\nfrom engine.gamma import gammamonitor\nfrom engine.OWM import OWMLonLatsearch\nfrom engine.SpotifyScrap import scrapSpotify\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscope=['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\ncreds = ServiceAccountCredentials.from_json_keyfile_name('好幫手.json',scope)\n\nclient = gspread.authorize(creds)\nLineBotSheet = client.open('好幫手')\nuserStatusSheet = LineBotSheet.worksheet('userStatus')\nuserInfoSheet = LineBotSheet.worksheet('userInfo')\n\napp = Flask(__name__)\n\n# 設定你的Channel Access Token\nline_bot_api = LineBotApi('zT/x0Dp81QA2Wp781ummtpycl3OxZk0M65BPz8SoCF1H6N93cSR50LMu8beeZ5jj9iM3C2hRBBk/4meraFGsJawJa3foM4c7tTf7tDTtudwlcDIFVyfHVhJIM67FyrOrVMgoe5J1X8dFf2m2X9P6fwdB04t89/1O/w1cDnyilFU=')\n# 設定你的Channel Secret\nhandler = WebhookHandler('e4fdbb0acac692e6c47353219f9657ea')\n\n# 監聽所有來自 /callback 的 Post Request\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n\t# get X-Line-Signature header value\n\tsignature = request.headers['X-Line-Signature']\n\t# get request body as text\n\tbody = request.get_data(as_text=True)\n\tapp.logger.info(\"Request body: \" + body)\n\t# handle webhook body\n\ttry:\n\t\thandler.handle(body, signature)\n\texcept InvalidSignatureError:\n\t\tabort(400)\n\treturn 'OK'\n\n@app.route(\"/web\")\ndef showWeb():\n\treturn '

Hello Every one

'\n\n#處理訊息\n#當訊息種類為TextMessage時,從event中取出訊息內容,藉由TextSendMessage()包裝成符合格式的物件,並貼上message的標籤方便之後取用。\n#接著透過LineBotApi物件中reply_message()方法,回傳相同的訊息內容\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n\n\tuserSend = event.message.text\n\tuserID = event.source.user_id\n\ttry:\n\t\tcell = userStatusSheet.find(userID)\n\t\tuserRow = cell.row\n\t\tuserCol = cell.col\n\t\tstatus = userStatusSheet.cell(cell.row,2).value\n\texcept:\n\t\tuserStatusSheet.append_row([userID])\n\t\tcell = userStatusSheet.find(userID)\n\t\tuserRow = cell.row\n\t\tuserCol = cell.col\n\t\tstatus = ''\n\tif status == '':\n\t\t#文字提示\n\t\tmessage = TextSendMessage(text='你尚未註冊,請填資料,\\n請複製以下的註册碼來填寫資料')\n\t\tline_bot_api.push_message(userID,message)\n\t\t#傳送使用者ID\n\t\tmessage = TextSendMessage(text=userID)\n\t\tline_bot_api.push_message(userID,message)\n\t\t#傳送確認表單\n\t\tmessage = TemplateSendMessage(\n\t\t\talt_text='註冊表單',\n\t\t\ttemplate=ConfirmTemplate(\n\t\t\t\ttext='請選擇【填寫表單】來註冊,完成後請點擊【完成】按鈕',\n\t\t\t\tactions=[\n\t\t\t\t\tURIAction(\n\t\t\t\t\t\tlabel='填寫表單',\n\t\t\t\t\t\turi='line://app/1609239460-ZEJqMXl0'\n\t\t\t\t\t),\n\t\t\t\t\tMessageAction(\n\t\t\t\t\tlabel='完成',\n\t\t\t\t\ttext='完成'\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\t)\n\t\tuserStatusSheet.update_cell(userRow, 2, '註冊中')\n\telif status == '註冊中':\n\t\ttry:\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tuserStatusSheet.update_cell(userRow, 2, '已註冊')\n\t\t\tmessage = TextSendMessage(text='Hi,{}您好,已註冊成功'.format(userInfoSheet.cell(infoCell.row,3).value))\n\t\texcept:\n\t\t\t#文字提示\n\t\t\tmessage = TextSendMessage(text='你尚未註冊,請填資料,\\n請複製以下的註册碼來填寫資料')\n\t\t\tline_bot_api.push_message(userID,message)\n\t\t\t#傳送使用者ID\n\t\t\tmessage = TextSendMessage(text=userID)\n\t\t\tline_bot_api.push_message(userID,message)\n\t\t\t#傳送確認表單\n\t\t\tmessage = TemplateSendMessage(\n\t\t\t\talt_text='註冊表單',\n\t\t\t\ttemplate=ConfirmTemplate(\n\t\t\t\t\ttext='請選擇【填寫表單】來註冊,完成��請點擊【完成】按鈕',\n\t\t\t\t\tactions=[\n\t\t\t\t\t\tURIAction(\n\t\t\t\t\t\t\tlabel='填寫表單',\n\t\t\t\t\t\t\turi='line://app/1609239460-ZEJqMXl0'\n\t\t\t\t\t\t),\n\t\t\t\t\t\tMessageAction(\n\t\t\t\t\t\tlabel='完成',\n\t\t\t\t\t\ttext='完成'\n\t\t\t\t\t\t)\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t\t)\n\t\t\tuserStatusSheet.update_cell(userRow, 2, '註冊中')\n\telif status == '已註冊':\n\t\tif userSend == '你好':\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tuserName = userInfoSheet.cell(infoCell.row,3).value\n\t\t\tmessage = TextSendMessage(text='Hello, ' + userName)\n\t\telif userSend == '天氣':\n\t\t\tuserStatusSheet.update_cell(userRow, 2, '天氣查詢')\n\t\t\tmessage = TextSendMessage(text='請傳送你的座標,請按下列的+號選項')\n\t\telif userSend in ['CNY', 'THB', 'SEK', 'USD', 'IDR', 'AUD', 'NZD', 'PHP', 'MYR', 'GBP', 'ZAR', 'CHF', 'VND', 'EUR', 'KRW', 'SGD', 'JPY', 'CAD', 'HKD']:\n\t\t\tmessage = TextSendMessage(text=currencySearch(userSend))\n\t\telif userSend == 'SOS':\n\t\t\tmessage = TemplateSendMessage(\n\t\t\t\talt_text='這是個按鈕選單',\n\t\t\t\ttemplate=ButtonsTemplate(\n\t\t\t\t\tthumbnail_image_url='https://i.imgur.com/Fpusd5M.png',\n\t\t\t\t\ttitle='這是您的選單按鈕',\n\t\t\t\t\ttext='請選擇以下的項目,另有貨幣查詢功能,需輸入貨幣代碼3位大寫英文',\n\t\t\t\t\tactions=[\n\t\t\t\t\t\tMessageAction(\n\t\t\t\t\t\t\tlabel='醫生',\n\t\t\t\t\t\t\ttext='醫生'\n\t\t\t\t\t\t),\n\t\t\t\t\t\tMessageAction(\n\t\t\t\t\t\t\tlabel='家人',\n\t\t\t\t\t\t\ttext='家人'\n\t\t\t\t\t\t),\n\t\t\t\t\t\tMessageAction(\n\t\t\t\t\t\t\tlabel='報警',\n\t\t\t\t\t\t\ttext='112'\n\t\t\t\t\t\t),\n\t\t\t\t\t\tURIAction(\n\t\t\t\t\t\t\tlabel='修改連絡資料',\n\t\t\t\t\t\t\turi='https://forms.gle/J8UL7uPCJabMuWvV6'\n\t\t\t\t\t\t)\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t\t)\n\t\telif userSend == '氣候':\n\t\t\tmessage = TemplateSendMessage(\n\t\t\t\talt_text='這是個按鈕選單',\n\t\t\t\ttemplate=ButtonsTemplate(\n\t\t\t\t\tthumbnail_image_url='https://i.imgur.com/iKYedf6.png',\n\t\t\t\t\ttitle='天氣查詢',\n\t\t\t\t\ttext='請選擇地點',\n\t\t\t\t\tactions=[\n\t\t\t\t\t\tMessageAction(\n\t\t\t\t\t\t\tlabel='查詢其他地方',\n\t\t\t\t\t\t\ttext='天氣'\n\t\t\t\t\t\t),\n\t\t\t\t\t\tURIAction(\n\t\t\t\t\t\t\tlabel='你所在位置',\n\t\t\t\t\t\t\turi='https://watch.ncdr.nat.gov.tw/townwarn/'\n\t\t\t\t\t\t)\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t\t)\n\n\t\telif userSend in ['spotify','音樂','music']:\n\t\t\tcolumnReply,textReply = scrapSpotify()\n\t\t\tmessage = TemplateSendMessage(\n\t\t\t\talt_text=textReply,\n\t\t\t\ttemplate=ImageCarouselTemplate(\n\t\t\t\tcolumns=columnReply\n\t\t\t)\n\t\t)\n\t\telif userSend == '便當店':\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tmessage = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,4).value))\n\t\telif userSend == '醫生':\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tmessage = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,6).value))\n\t\telif userSend == '家人':\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tmessage = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,7).value))\n\t\telif userSend == '水電行':\n\t\t\tinfoCell = userInfoSheet.find(userID)\n\t\t\tmessage = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,5).value))\t\t\t\n\t\telse:\n\t\t\tmessage = TextSendMessage(text=userSend)\n\telif status == '天氣查詢':\n\t\tmessage = TemplateSendMessage(\n\t\t\talt_text='是否取消查詢',\n\t\t\ttemplate=ConfirmTemplate(\n\t\t\t\ttext='是否取消查詢?',\n\t\t\t\tactions=[\n\t\t\t\t\tURIAction(\n\t\t\t\t\t\t\tlabel='傳送位置資訊',\n\t\t\t\t\t\t\turi='line://nv/location'\n\t\t\t\t\t),\n\t\t\t\t\tMessageAction(\n\t\t\t\t\t\tlabel='取消查詢',\n\t\t\t\t\t\ttext='取消'\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\t)\n\t\tuserStatusSheet.update_cell(userRow, 2, '已註冊')\n\tline_bot_api.reply_message(event.reply_token, message)\n\n@handler.add(MessageEvent, message=LocationMessage)\ndef handle_message(event):\n\tuserID = event.source.user_id\n\ttry:\n\t\tcell = userStatusSheet.find(userID)\n\t\tuserRow = cell.row\n\t\tuserCol = cell.col\n\t\tstatus = userStatusSheet.cell(cell.row,2).value\n\texcept:\n\t\tuserStatusSheet.append_row([userID])\n\t\tcell = userStatusSheet.find(userID)\n\t\tuserRow = cell.row\n\t\tuserCol = cell.col\n\t\tstatus = ''\n\tif status == '天氣查詢':\n\t\tuserAddress = event.message.address\n\t\tuserLat = event.message.latitude\n\t\tuserLon = event.message.longitude\n\n\t\tweatherResult = OWMLonLatsearch(userLon,userLat)\n\t\tAQIResult = AQImonitor(userLon,userLat)\n\t\tgammaResult = gammamonitor(userLon,userLat)\n\t\tuserStatusSheet.update_cell(userRow, 2, '已註冊')\n\t\tmessage = TextSendMessage(text='🌤天氣狀況:\\n{}\\n🚩空氣品質:\\n{}\\n\\n🌌輻射值:\\n{}'.format(weatherResult,AQIResult,gammaResult))\n\telif status == '':\n\t\t#文字提示\n\t\tmessage = TextSendMessage(text='你尚未註冊,請填基本資料!\\n請複製以下註冊碼來��寫表單')\n\t\tline_bot_api.push_message(userID,message)\n\t\t#傳送使用者ID\n\t\tmessage = TextSendMessage(text=userID)\n\t\tline_bot_api.push_message(userID,message)\n\t\t#傳送確認表單\n\t\tmessage = TemplateSendMessage(\n\t\t\talt_text='註冊表單',\n\t\t\ttemplate=ConfirmTemplate(\n\t\t\t\ttext='請選擇[填寫表單]來註冊, 完成後請點擊[完成]按鈕',\n\t\t\t\tactions=[\n\t\t\t\t\tURIAction(\n\t\t\t\t\t\t\tlabel='填寫表單',\n\t\t\t\t\t\t\turi='line://app/1609239460-ZEJqMXl0'\n\t\t\t\t\t),\n\t\t\t\t\tMessageAction(\n\t\t\t\t\t\tlabel='填寫完成',\n\t\t\t\t\t\ttext='完成'\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\t)\t\t\t\t\n\t\tuserStatusSheet.update_cell(userRow, 2, '註冊中')\t\t\n\telse:\n\t\tmessage = TextSendMessage(text='傳地址幹嘛?')\n\tline_bot_api.reply_message(event.reply_token, message)\n\n@handler.add(MessageEvent, message=StickerMessage)\ndef handle_message(event):\n\tmessage = TextSendMessage(text='我看不懂貼圖')\n\tline_bot_api.reply_message(event.reply_token, message)\n\nimport os\nif __name__ == \"__main__\":\n\tport = int(os.environ.get('PORT', 5000))\n\tapp.run(host='0.0.0.0', port=port)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 9282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 16, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 16, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "linebot.LineBotApi", "line_number": 26, "usage_type": "call"}, {"api_name": "linebot.WebhookHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "linebot.exceptions.InvalidSignatureError", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 42, "usage_type": "call"}, {"api_name": "engine.currencySearch.currencySearch", "line_number": 132, "usage_type": "call"}, {"api_name": "engine.SpotifyScrap.scrapSpotify", "line_number": 181, "usage_type": "call"}, {"api_name": "engine.OWM.OWMLonLatsearch", "line_number": 241, "usage_type": "call"}, {"api_name": "engine.AQI.AQImonitor", "line_number": 242, "usage_type": "call"}, {"api_name": "engine.gamma.gammamonitor", "line_number": 243, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 282, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 282, "usage_type": "attribute"}]} +{"seq_id": "282268603", "text": "#!coding:utf-8\nimport execjs\nimport re\nimport requests\nfrom urllib3 import disable_warnings\nfrom requests import Session\nimport wx\nimport io\nimport time\nimport uuid\nimport json\nimport base64\nimport sys\nimport Crypto\nimport traceback\nfrom Crypto.Cipher import AES\n\n# SECRET_KEY = 'B123JDVgT8WDGOWBgQv6EIhvxl4vDYvUnVdg-Vjdt11='\nSECRET_KEY = '4vDYvUnVdg-Vjdt11='\nHOST = 'http://vpn.yangmingcheng.online:8001'\n\n'''\n采用AES对称加密算法\n'''\nscore_switch = True\ntimeout = 50\nversionName = u'多设备业务办理0226'\n\nif score_switch:\n versionName = versionName + u'(积分版)'\n\n\n# str不是16的倍数那就补足为16的倍数\ndef add_to_16(value):\n while len(value) % 16 != 0:\n value += '\\0'\n return str.encode(value) # 返回bytes\n\n\n# 加密方法\ndef encrypt_text(text):\n if type(text) is str:\n to_encrypt_data = text\n else:\n to_encrypt_data = json.dumps(text)\n # 秘钥\n # key = '123456'\n # 待加密文本\n # text = 'abc123def4561111111111111111111111111111111111111111111111111111111111111111111111'\n # 初始化加密器\n aes = AES.new(add_to_16(SECRET_KEY), AES.MODE_ECB)\n # 先进行aes加密\n encrypt_aes = aes.encrypt(add_to_16(to_encrypt_data))\n # 用base64转成字符串形式\n encrypted_text = str(base64.encodebytes(encrypt_aes), encoding='utf-8') # 执行加密并转码返回bytes\n # print(encrypted_text)\n return encrypted_text\n\n\n# 解密方法\ndef decrypt_text(text):\n if not text:\n return None\n if type(text) is str:\n to_decrypt_data = text\n else:\n to_decrypt_data = json.dumps(text)\n # 秘钥\n # key = '123456'\n # 密文\n # text = 'qR/TQk4INsWeXdMSbCDDdA=='\n # 初始化加密器\n aes = AES.new(add_to_16(SECRET_KEY), AES.MODE_ECB)\n # 优先逆向解密base64成bytes\n base64_decrypted = base64.decodebytes(to_decrypt_data.encode(encoding='utf-8'))\n # 执行解密密并转码返回str\n decrypted_text = str(aes.decrypt(base64_decrypted), encoding='utf-8').replace('\\0', '')\n try:\n decrypted_text = json.loads(decrypted_text)\n except:\n pass\n # print(decrypted_text)\n return decrypted_text\n\n\n# d = encrypt_text({'data':'123'})\n# decrypt_text(d)\n# exit()\n\n#\n# def encrypt_text(data):\n# \"\"\"\n# 加密字符串\n# :param text:需要加密的内容\n# :return: 加密后的内容\n# \"\"\"\n# if type(data) is str:\n# to_encrypt_data = data.encode('utf-8')\n# else:\n# to_encrypt_data = json.dumps(data).encode('utf-8')\n# cipher = Fernet(SECRET_KEY)\n# return cipher.encrypt(to_encrypt_data).decode('utf-8')\n#\n#\n# def decrypt_text(data):\n# \"\"\"\n# 解密字符串\n# :param text:需要解密的内容\n# :return: 解密后的内容\n# \"\"\"\n# if not data:\n# return None\n# if type(data) is str:\n# to_decrypt_data = data.encode('utf-8')\n# else:\n# to_decrypt_data = json.dumps(data).encode('utf-8')\n# cipher = Fernet(SECRET_KEY)\n# decrypt_data = cipher.decrypt(to_decrypt_data).decode('utf-8')\n# try:\n# decrypt_data = json.loads(decrypt_data)\n# except:\n# pass\n# return decrypt_data\n\n\nprint('请勿关闭此窗口!')\n\n\ndef GetMondrianData():\n return b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00<\\x00\\x00\\x006\\x08\\x06\\x00\\x00\\x00' \\\n b'\\x9bg\\xfa\\x14\\x00\\x00\\x00\\x01sRGB\\x00\\xae\\xce\\x1c\\xe9\\x00\\x00\\x00\\x04gAMA\\x00\\x00\\xb1' \\\n b'\\x8f\\x0b\\xfca\\x05\\x00\\x00\\x00\\tpHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\x01\\xc7o\\xa8d\\x00' \\\n b'\\x00\\x05%IDAThC\\xed\\x99=o\\x1dE\\x14\\x86\\xf3\\x0f\\xe0\\x1f\\x90?\\x90\\x8f\\x1e$hC\\x93tI\\x07\\r' \\\n b'\\xb4 A\\x9f\\x0f\\x89.\\xc8)\\xa24`@\\xc2\\x9d\\x91@\\xa62\\x14\\xb1\\x92\\xc2\\xf9\\x90LbdK\\x8el\\x05' \\\n b'\\xc7X\\x18\\x83\\x85\\xa3 \\x02\\x02i\\xf0\\xb3\\xd9\\xd79wtfw\\xf6\\xde\\xd9(r\\xee+\\x1d\\xd9w<\\xbb{\\x9e9' \\\n b'\\x1f3{}(\\xbc`\\x1a\\x03\\x1ft\\x8d\\x81\\x0f\\xba\\xc6\\xc0}\\xe9\\xfe\\xee?\\xe1\\xf3\\xbb;\\xe1\\xec\\xb5_' \\\n b'\\xc2\\xeb_\\xaeV\\xf6\\xdew?W\\x9f\\x7f\\xd8\\xfa\\xab\\x9e\\xd5\\xbfz\\x07\\xbe\\xfa\\xd3\\xa3\\n\\xee\\xd0Gw' \\\n b'\\x1a\\xed\\x95\\xcb\\xcb\\xe1\\xeb\\x95\\xdd\\xfa\\xaa\\xfe\\xd4\\x1b0Q\\xcb\\x01\\x8d\\x8dk\\xc8\\x86\\xbe\\xd4' \\\n b'\\x0b0\\xa9\\xfb\\xd2\\xc5\\x1f]\\xa0\\xe3\\x9f\\xae\\x84s\\xd7\\xb6\\xc2\\x17\\x8b;an\\xfdQ\\xb8t\\xeb\\xb7\\xf0' \\\n b'\\xfe\\xf7\\x9b\\xd5\\xb8\\xe6pm_\\xd1.\\x0e\\xfc\\xd6\\xcc\\x83\\x01@\\xd9\\xa9\\xe9\\xfb\\xad\\x91#\\xfd\\xdf\\x98z' \\\n b'\\x9a\\x15\\xdc\\xab\\xb4\\x8a\\x01\\xff\\xf1\\xf8?7\\x85_\\xfe\\xb8{\\xb4\\xc8\\x10\\xae\\xe3\\xfac\\x9f\\xacT\\xf7.' \\\n b'\\xa5\"\\xc0\\xd4+\\x8e\\xc5\\xb0\\xa4\\xa9\\xe7,c,\\x02\\x1dZFt\\xad\\xb8\\xa7\\xd2\\xbc$\\xf4\\xc8\\xc08\\xe6\\xd5' \\\n b'\\xeb\\xdb\\xdf>p\\x9d\\x04.U\\xdf\\x8c\\xf3w]\\xc7\\xcf\\xd2\\xd0#\\x01\\x13%\\xcfy\\x9aP,\\x9c\\xf5\\xb2\\xc03\\xe6' \\\n b'\\xd9\\xbd\\x99\\xc5\\xd3\\xf8\\xa8\\xd0C\\x03Sg\\xb1\\xa3\\x18\\xe3\\xb1RY \\xa3\\xa1\\xd1\\xb9\\xf9\\xa9\\xdae\\xbe' \\\n b'\\xbdW)\\xe8\\xa1\\x80\\xbb\\xc0R\\x9b)X\\xe0l$\\x110\\xc0k\\x8e\\x07=J\\xf7\\xee\\x0c\\xec\\xc1\\xe2\\xb8\\x07\\x9bZ' \\\n b'\\x18,\\xd5\\xd0$\\x16B\\xd1\\xe6\\x08*\\tz\\xe2\\xe6v=\\xd2M\\x9d\\x80qPN\\xc8\\xbc(\\xa1&X\\xf6\\xda\\x9c\\xb4d\\x8e\\x9a' \\\n b'\\x96\\xa2j\\xc7\\xe2\\xce\\x9e\\xa3\\xce\\x11\\xe6!r<\\x05K\\xa7\\xd5\\x9c\\xd8\\x88\\x90\\xa7\\xd5\\xdfg\\xc3\\xfc\\xfaDe' \\\n b'\\xfc.5AS*9\\x0bg5t\\r\\x13%\\x0f6u\\xd2\\xc2\\xa8\\xcdX\\x7f\\xff\\xfb0L/\\x9e\\x0e\\x13\\xd7\\x0f\\x0f\\xd8\\xd4\\xc2' \\\n b'\\x89\\xf0\\xf0\\xf1F5\\xc7B+\\x95\\x95\\xf2\\'\\xf7\\x1a]\\x17e\\x01o\\xff\\xb9T9\\xc1\\xcf&5\\xc1z5\\xce\\xfd\\x00' \\\n b'\\x8baeW\\xe6\\x8f\\xec?\\xd3B\\xeb^l\\x8b|\\xeer\\x92\\xcb\\x02&\\nS\\x0bo\\x0e8\\x10+\\x05K\\x14R\\xb0\\xdc\\xcf\\x03' \\\n b'\\xb5\\xc6\\x1c\\xa5\\xb8\\x85\\x16$\\xe5\\xc3\\xabenjg\\xa7\\xf4\\x13\\x07\\x8fV\\x11a\\x01\\xac\\x9a`\\xbd\\xb4_\\xda' \\\n b'\\x9an\\x85\\x9d^<\\x13&o\\xbf\\xb6\\xff\\x99k\\x90\\xa0\\xa9_\\xdd\\x9b\\xf2\\xca\\xed\\xda\\x9dj\\x98\\x87\\xf2\\xf0\\x18' \\\n b'\\xda\\xeb\\xc8M\\xb0\\x82\\xf0\\x8cLR\\xed\\xa2\\x85\\xcd\\xcf\\xf6\\xff\\x16C+\\xb2X\\xee;t\\x160\\x00r~n\\xed|' \\\n b'\\xedX\\x1a:\\x05\\xabkS\\x06l\\x9c=\\xe8IF\\x1c\\xad\\xe6Xh\\x9e\\xc3\\x1bZ\\x17e\\x01\\xb3\\x95\\xd8:\\x99\\xbd\\xf7a' \\\n b'\\xed\\xe0 4uE\\'\\xf6`gW>\\x18\\x80\\x8b\\x8d{6I%\\xc5\\\\A\\xf3\\x1c\\xa0\\xa9\\xe3\\\\e\\x01+\\x85\\xec\\x16\\x90\\x82' \\\n b'\\x8e\\xc5\\xdf\\x86\\x81\\xe5\\x99\\xec\\xf9Zd\\xe4A+\\xb3r\\x0f!\\xd95\\xccjrcm\\xfeH\\xd0\\x93\\xb7^u\\xbb7\\xb0,\\x88' \\\n b'\\x85\\x8bmn\\xedB=\\xfb\\xa9\\x80\\xd4\\x9bU\\xfc\\xb2`\\xa1\\xd5\\xbd9z\\xe6\\xa6v\\x16\\xb0RT\\xabi\\xb7\\x19A\\xd3u7v' \\\n b'\\xe7\\xeb\\xd1P5\\x9e6XE\\xc9\\xca\\xc2\\xcaR\\xd0<\\xd3[\\xe8&e\\x01\\xb3z\\x82d5S\\xd0\\x18\\xa7&\\xaci\\xdb\\xc1Y\\x0f' \\\n b'\\x96\\x85M\\xbdY\\xd9\\x17\\x08$h\\xb2\\xab\\xa9\\xa4be\\x01\\xeb\\x15O\\x91\\xe6\\xbd\\x15\\',\\xf4\\xfc\\xfa%\\x17.6\\x9c' \\\n b'\\xf4\\xa2\\xd2\\x04K\\xff\\xf0DJs\\xcf\\x99\\xe5w\\xea\\x91ve\\xd70\\x90J-L\\'\\x1e\\x0bM\\xd4b@kl;\\x1e\\xac\\x164\\x06' \\\n b'\\xc5\\xda\\xde\\xac\\xf4L\\xbbw7)\\x1b\\x98\\x87\\xb2\\x05\\xa8S\\xa7\\xa0\\xa9c5\\x15k\\x9c\\x9c\\xbc\\xd4\\xe3\\xda\\x18R' \\\n b'\\xe6\\xbdY\\xf1\\\\\\xfb\\xbd\\x17\\xda\\xd8\\xbdQ\\xff\\xd6\\xael`\\xa4\\xc3\\xba\\xf6=\\x1ezxo\\x7ff\\xccB\\x03\\xc6\\t\\t\\xc8' \\\n b'\\x99\\xe5w\\xc3\\xd2\\xaf_\\xd5\\x7f\\x19\\x14\\xc7A\\x0bh\\xcd\\x83\\xe54\\x95\\xea\\xde\\xb9\\xea\\x04\\x8c\\xf4\\x8d\\x83' \\\n b'\\x00\\xa9=\"o\\xc7r\\xd4\\xf4f\\xe5\\xc1z5n\\xb7\\xc8\\\\u\\x06\\xb6\\xa9\\xac\\xcd\\x1eg4\\xd6\\xe6\\x04\\xd7S\\x16\\xd6qk' \\\n b'\\xfc\\xeb%V\\xaa\\xa1y\\x0b\\xd3\\xa6\\xce\\xc0HQ\\xb5\\x9d\\xdb.\\x04\\xdb\\x98\\x97n\\x8c\\xc5{\\xac5/CJ\\xc2\\xa2\\xa1\\x80' \\\n b'\\x11\\xd1\\xe5\\xc1)h\\x8chS\\xf7\\xcc\\xa5^=\\xc71\\x16\\xcf{\\x89g\\xcc\\xbb\\xc6\\xfb\\xe6$WC\\x03#uX\\x0b\\x8d' \\\n b'\\xf8\">v2e4={\\xad\\xa4{\\xc7\\xd6\\xa5Ox\\x1a\\t\\x18Y\\xc7\\xac3DG\\x1d\\xe1wa-`\\x9fQ\\xe9\\\n# \\x86\\x01\\x04\\x10\\x00\\\\(Dk\\x1b-\\x04\\xdc\\x1d\\x07\\x14\\x98;\\x0bS\\x7f\\x7f\\xf9\\x13\\\n# \\x04\\x10@\\xf9X\\xbe\\x00\\xc9 \\x14K\\xc1<={\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n\n\ndef GetMondrianBitmap():\n return wx.Bitmap(GetMondrianImage())\n\n\ndef GetMondrianImage():\n stream = io.BytesIO(GetMondrianData())\n return wx.Image(stream)\n\n\ndef GetMondrianIcon():\n icon = wx.Icon()\n icon.CopyFromBitmap(GetMondrianBitmap())\n return icon\n\n\ndef get_mac_address():\n mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return \":\".join([mac[e:e + 2] for e in range(0, 11, 2)])\n\n\ndef get_score():\n req_data = {\n 'wkNo': workNo,\n 'phone': login_phone,\n 'password': login_password,\n 'time': time.time(),\n }\n try:\n res = requests.post(url=HOST + '/api/Login/', data={'data': encrypt_text(req_data)}, timeout=timeout)\n # print(decrypt_text(json.loads(res.text)['data']))\n data = decrypt_text(json.loads(res.text)['data'])\n return data\n except:\n print(traceback.format_exc())\n return\n\n\ndef cost_score(msg):\n if '成功' in msg:\n order_result = True\n else:\n order_result = False\n req_data = {\n 'wkNo': workNo,\n 'msg': msg,\n 'order_result': order_result,\n 'time': time.time(),\n }\n try:\n res = requests.post(url=HOST + '/api/OrderResult/', data={'data': encrypt_text(req_data)}, timeout=50)\n data = decrypt_text(json.loads(res.text)['data'])\n return data\n except:\n print(traceback.format_exc())\n return\n\n\n# workNo = 'J120249'\n# d = cost_score('成功了')\n# print(d)\n# exit()\n# data = b'My super secret message'\n# d = encrypt_text(data)\n# decrypt_text(d)\n# exit()\ndisable_warnings()\n\n# D920ABDB874BA71A3A7F4BFCDE6891F094693E0E4B617AB0\nsession = Session()\nsession.headers = {\n 'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n}\nencryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac = '', '', '', '', '', '', '', ''\n\n\ndef save_html(txt):\n with open('error{}.html'.format(int(time.time())), 'w+') as f:\n f.write(txt)\n\n\ndef save_code(data):\n with open('code.jpg', 'wb+') as f:\n f.write(data)\n return data\n\n\nclass LOGIN(wx.Frame):\n def __init__(self, parent, id):\n self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)\n wx.Frame.__init__(self, parent, id, u'登录', size=(350, 400))\n # 创建面板\n panel = wx.Panel(self)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n\n # with open('login.js', encoding='utf-8') as f:\n # js = f.read()\n js = get_js_file()\n self.ctx = execjs.compile(js)\n # 添加容器,容器中控件按横向并排排列\n bsizer_phone = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_wkno = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_password = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_mac = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_pic_code = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_msg_code = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_button = wx.BoxSizer(wx.HORIZONTAL)\n\n self.login_tips_phone = wx.StaticText(panel, 0, u\"手机号: \", style=wx.TE_LEFT)\n self.login_tips_wkno = wx.StaticText(panel, 0, u\"工号: \", style=wx.TE_LEFT)\n self.login_tips_password = wx.StaticText(panel, 0, u\"密码: \", style=wx.TE_LEFT | wx.EXPAND)\n self.login_tips_mac = wx.StaticText(panel, 0, u\"MAC: \", style=wx.TE_LEFT | wx.EXPAND)\n self.login_tips_pic_code = wx.StaticText(panel, 0, u\"图片验证码 :\", style=wx.TE_LEFT | wx.EXPAND)\n # self.msg_code_pic = wx.Icon(name='code.jpg', type=wx.BITMAP_TYPE_PNG)\n self.img_data = self.get_code()\n self.image = wx.Image(self.img_data, wx.BITMAP_TYPE_JPEG).ConvertToBitmap()\n self.msg_code_pic = wx.BitmapButton(panel, -1, bitmap=self.image)\n\n self.login_tips_msg_code = wx.StaticText(panel, 0, u\"短信验证码 :\", style=wx.TE_LEFT | wx.EXPAND)\n self.bt_send_msg = wx.Button(panel, label='发送验证码')\n self.Bind(wx.EVT_BUTTON, self.get_login_phone_msg, self.bt_send_msg)\n\n self.bt_login = wx.Button(panel, label='登录')\n self.Bind(wx.EVT_BUTTON, self.login, self.bt_login)\n\n self.bt_login_cookie = wx.Button(panel, label='凭证登录')\n bsizer_button.Add(self.bt_login, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_button.Add(self.bt_login_cookie, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n self.Bind(wx.EVT_BUTTON, self.login_by_cookie, self.bt_login_cookie)\n\n self.login_phone_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.login_wkno_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.login_password_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.login_mac_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.login_pic_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.login_msg_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n\n bsizer_phone.Add(self.login_tips_phone, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_phone.Add(self.login_phone_box, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n\n bsizer_wkno.Add(self.login_tips_wkno, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_wkno.Add(self.login_wkno_box, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n\n bsizer_password.Add(self.login_tips_password, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_password.Add(self.login_password_box, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n\n bsizer_mac.Add(self.login_tips_mac, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_mac.Add(self.login_mac_box, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n\n bsizer_pic_code.Add(self.login_tips_pic_code, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_pic_code.Add(self.login_pic_code_box, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_pic_code.Add(self.msg_code_pic, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n self.Bind(wx.EVT_BUTTON, self.get_code, self.msg_code_pic)\n\n bsizer_msg_code.Add(self.login_tips_msg_code, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_msg_code.Add(self.login_msg_code_box, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_msg_code.Add(self.bt_send_msg, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n\n # wx.VERTICAL 横向分割\n bsizer_all = wx.BoxSizer(wx.VERTICAL)\n # 添加顶部sizer,proportion=0 代表bsizer_top大小不可变化\n bsizer_all.Add(bsizer_phone, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_wkno, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_password, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_mac, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_pic_code, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_msg_code, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_button, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n panel.SetSizer(bsizer_all)\n self.SetIcon(GetMondrianIcon())\n\n # self.bt_send_act_msg = wx.Button(panel, label='发送验证码')\n\n def OnClose(event, evt):\n sys.exit(0)\n\n def Onmsgbox(self, evt=None, msg=''):\n wx.MessageBox(msg, \"Message\", wx.OK | wx.ICON_INFORMATION)\n\n def encryption_data(self, workNo, login_phone, login_password, yzmbox):\n d = self.ctx.call(\"sendRandomUser\", workNo, login_phone, login_password, yzmbox)\n return d[0], d[1], d[2]\n\n def get_code(self, evt=None):\n global session\n code_url = self.ctx.call(u'yzmboxClick')\n # print(u'获取验证码!')\n\n data = session.get(url=code_url, verify=False)\n # img_data = save_code(data.content)\n data_stream = io.BytesIO(data.content)\n if evt:\n self.image = wx.Image(data_stream, wx.BITMAP_TYPE_JPEG).ConvertToBitmap()\n # 更新GridBagSizer()的self.bmp2\n self.msg_code_pic.SetBitmap(wx.Bitmap(self.image))\n # self.msg_code_pic.SetBitmap(wx.BitmapFromBuffer(img_data))\n self.Onmsgbox(evt, '刷新验证码成功!')\n else:\n return data_stream\n\n def get_login_phone_msg(self, evt):\n global session, encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac\n try:\n login_phone = self.login_phone_box.GetValue().strip()\n workNo = self.login_wkno_box.GetValue().strip()\n login_password = self.login_password_box.GetValue().strip()\n yzmbox = self.login_pic_code_box.GetValue().strip()\n clientMac = self.login_mac_box.GetValue().strip()\n\n if not all([login_phone, workNo, login_password, yzmbox, clientMac]):\n # print('某个字段为空')\n self.Onmsgbox(evt, '某个字段为空!')\n return ''\n try:\n encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew = self.encryption_data(\n workNo, login_phone, login_password, yzmbox)\n except:\n self.Onmsgbox(evt, '参数不合法!')\n return ''\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxLoginSendMsgAction.action'\n parms = {\n 'userName': encryped_mobileNoNew,\n 'workNo': encryped_workNoNew,\n 'password': encryped_passwordNew,\n 'yzmbox': yzmbox,\n 'clientMac': clientMac,\n }\n # print('发送短信验证码!')\n data = session.post(url=url, data=parms, timeout=timeout, verify=False)\n\n if data.text == '验证码已经发送,请注意查收!':\n # print('发送成功!')\n self.Onmsgbox(evt, '验证码已经发送,请注意查收!')\n action_frame.text_contents.AppendText(u'验证码已经发送,请注意查收!\\n')\n\n return True\n else:\n self.Onmsgbox(evt, '验证码发送失败!')\n action_frame.text_contents.AppendText(u'验证码发送失败!\\n')\n\n # print(data.text)\n return False\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n\n def first_request(self):\n global session\n try:\n session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=timeout, verify=False)\n session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=timeout, verify=False)\n except:\n print(traceback.format_exc())\n return session\n\n def open_cookie(self):\n myCookie = {}\n with open('cookies.json', 'r') as f:\n listCookies = json.loads(f.read())\n for cookie in listCookies:\n myCookie.update({cookie['name']: cookie['value']})\n return myCookie\n\n def login_by_cookie(self, evt):\n global session, workNo, login_phone, login_password\n try:\n # 第一次获取cookie\n self.first_request()\n\n cookies = self.open_cookie()\n session.cookies._cookies['211.138.30.200']['/WSKF']['JSESSIONID'].value = cookies['JSESSIONID']\n session.cookies._cookies['211.138.30.200']['/WSKF/s_channel']['randomStr_HW'].value = cookies[\n 'randomStr_HW']\n\n workNo = cookies['workNo']\n login_phone = cookies['login_phone']\n login_password = cookies['login_password']\n url = u'https://211.138.30.200/WSKF/s_channel/mainAction.action'\n data = session.get(url=url, timeout=timeout, verify=False)\n\n if u'请先输入工号' not in data.text:\n # print(u'登录成功\\n')\n # print('手机号: {}\\n'.format(re.findall('手机号:(.*?)', data.text)[0]))\n # print(u'工号: %s\\n' % re.findall('工号:(.*?)', data.text)[0])\n if score_switch:\n result = get_score()\n if not result:\n self.Onmsgbox(evt, u'登录失败,验证未通过')\n return\n self.Onmsgbox(evt, u'登录成功!')\n self.Destroy()\n action_frame.Show()\n if score_switch:\n action_frame.set_score(result['score'])\n action_frame.text_contents.AppendText(u'登录成功!\\n')\n action_frame.text_contents.AppendText(u'手机号: %s\\n' % re.findall(u'手机号:(.*?)', data.text)[0])\n action_frame.text_contents.AppendText(u'工号: %s\\n' % re.findall(u'工号:(.*?)', data.text)[0])\n # return data.text\n else:\n # print(u'登录失败')\n self.Onmsgbox(evt, u'登录失败,凭证过期')\n self.get_code(evt)\n return ''\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n\n except:\n print(traceback.format_exc())\n return ''\n\n def login(self, evt):\n global session, encryped_mobileNoNew, encryped_workNoNew, encryped_passwordNew, yzmbox, login_phone, workNo, login_password, clientMac\n try:\n # 第一次获取cookie\n login_msg_code = self.login_msg_code_box.GetValue().strip()\n login_phone = self.login_phone_box.GetValue().strip()\n workNo = self.login_wkno_box.GetValue().strip()\n login_password = self.login_password_box.GetValue().strip()\n yzmbox = self.login_pic_code_box.GetValue().strip()\n clientMac = self.login_mac_box.GetValue().strip()\n if not all([clientMac, login_phone, workNo, login_password, login_msg_code]):\n # print(u'某个字段为空')\n self.Onmsgbox(evt, u'某个字段为空!')\n return ''\n self.first_request()\n\n parms = {\n 'info.brower': 'IE7.0',\n 'info.os': 'Win10 32位',\n 'info.mac': clientMac,\n 'info.workno': workNo,\n 'info.mobileno': login_phone}\n self.check_info(parms)\n headers = {\n 'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/login.action',\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache'\n }\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxMainAction.action'\n parms = {\n 'loginCondition.mobileNo': encryped_mobileNoNew,\n 'loginCondition.workNo': encryped_workNoNew,\n 'loginCondition.password': encryped_passwordNew,\n 'loginCondition.yzmbox': yzmbox,\n 'loginCondition.randCode': login_msg_code,\n 'clientIp': '',\n 'clientMac': clientMac\n }\n data = session.post(url=url, data=parms, headers=headers, timeout=timeout, verify=False)\n cookies = [{\"domain\": \"211.138.30.200\", \"name\": \"JSESSIONID\", \"value\": \"\",\n \"path\": \"//WSKF\", \"httpOnly\": True, \"secure\": True},\n {\"domain\": \"211.138.30.200\", \"name\": \"randomStr_HW\", \"value\": \"\",\n \"path\": \"//WSKF/s_channel/\", \"httpOnly\": True, \"secure\": True},\n {'name': 'workNo', 'value': workNo},\n {'name': 'login_phone', 'value': login_phone},\n {'name': 'login_password', 'value': login_password}]\n\n if u'请先输入工号' not in data.text:\n for i in cookies:\n if i['name'] == u'JSESSIONID':\n i['value'] = session.cookies._cookies['211.138.30.200']['/WSKF']['JSESSIONID'].value\n if i['name'] == u'randomStr_HW':\n i['value'] = session.cookies._cookies['211.138.30.200']['/WSKF/s_channel']['randomStr_HW'].value\n\n with open('cookies.json', 'w+') as f:\n f.write(json.dumps(cookies))\n # print('登录成功')\n # print('手机号: %s' % re.findall('手机号:(.*?)', data.text)[0])\n # print('工号: %s' % re.findall('工号:(.*?)', data.text)[0])\n if score_switch:\n result = get_score()\n if not result:\n self.Onmsgbox(evt, u'登录失败,验证未通过')\n return\n self.Onmsgbox(evt, u'登录成功!')\n self.Destroy()\n action_frame.Show()\n if score_switch:\n action_frame.set_score(result['score'])\n action_frame.text_contents.AppendText(u'登录成功!\\n')\n action_frame.text_contents.AppendText(u'手机号: %s\\n' % re.findall('手机号:(.*?)', data.text)[0])\n action_frame.text_contents.AppendText(u'工号: %s\\n' % re.findall('工号:(.*?)', data.text)[0])\n return data.text\n else:\n # print(u'登录失败')\n # print(data.text)\n self.Onmsgbox(evt, '登录失败')\n self.get_code(evt)\n return ''\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n\n\n except:\n print(traceback.format_exc())\n return ''\n\n def check_info(self, params):\n global session\n url = 'https://211.138.30.200/WSKF/s_channel/checkComInfo.action'\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Accept': 'text/plain, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/login.action',\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache'\n }\n\n try:\n session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)\n return True\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n except:\n # print(traceback.format_exc())\n return\n\n\nclass YD_MAKE_ORDER(wx.Frame):\n def __init__(self, parent, id):\n self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)\n wx.Frame.__init__(self, parent, id, versionName, size=(550, 400))\n self.user_score = 0\n self.no_score = False\n # 创建面板\n panel = wx.Panel(self)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n\n # 添加容器,容器中控件按横向并排排列\n bsizer_top = wx.BoxSizer(wx.HORIZONTAL)\n self.act_type_list = [u'一键迁转', u'产品订购']\n self.act_type_choose = wx.Choice(panel, -1, choices=self.act_type_list)\n self.act_type_choose.Select(0)\n self.Bind(wx.EVT_CHOICE, self.on_choice_act_type, self.act_type_choose)\n\n self.bt_search_act2 = wx.Button(panel, label=u'锁定活动')\n self.bt_login = wx.Button(panel, label=u'刷新')\n self.Bind(wx.EVT_BUTTON, self.refrash, self.bt_login)\n self.Bind(wx.EVT_BUTTON, self.lock_act, self.bt_search_act2)\n\n # self.Bind(wx.EVT_BUTTON, self.one_flow, self.bt_login)\n\n bsizer_top.Add(self.act_type_choose, proportion=4, flag=wx.EXPAND | wx.LEFT, border=4)\n bsizer_top.Add(self.bt_search_act2, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)\n bsizer_top.Add(self.bt_login, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)\n\n bsizer_cust_phone = wx.BoxSizer(wx.HORIZONTAL)\n self.st_tips3 = wx.StaticText(panel, 0, u\"手机号 :\", style=wx.TE_LEFT)\n self.cust_phone_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n\n # 搜索具体ID\n self.act_id_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.act_id_box.Enable(False)\n\n self.bt_search_act = wx.Button(panel, label=u'查询活动')\n\n self.Bind(wx.EVT_BUTTON, self.make_mv_action, self.bt_search_act)\n bsizer_cust_phone.Add(self.st_tips3, proportion=0, flag=wx.EXPAND | wx.TOP | wx.RIGHT, border=10)\n bsizer_cust_phone.Add(self.cust_phone_box, proportion=0, flag=wx.CENTER, border=15)\n bsizer_cust_phone.Add(self.act_id_box, proportion=0, flag=wx.CENTER, border=15)\n bsizer_cust_phone.Add(self.bt_search_act, proportion=1, flag=wx.EXPAND | wx.LEFT, border=5)\n\n # bsizer_cust_phone.Add(self.bt_send_act_msg, proportion=0, flag=wx.EXPAND | wx.LEFT, border=15)\n # bsizer_cust_phone.Add(self.bt_make_order, proportion=0, flag=wx.EXPAND | wx.LEFT, border=15)\n\n # 业务选择\n bsizer_act = wx.BoxSizer(wx.VERTICAL)\n self.st_tips = wx.StaticText(panel, 0, u\"活动选择 :\", style=wx.TE_LEFT)\n self.act_list = []\n self.act_choose = wx.Choice(panel, -1, choices=self.act_list)\n self.Bind(wx.EVT_CHOICE, self.on_choice, self.act_choose)\n\n self.st_tips2 = wx.StaticText(panel, 0, u\"套餐选择 :\", style=wx.TE_LEFT)\n self.act_son_list = []\n self.act_son_choose = wx.Choice(panel, -1, choices=self.act_son_list)\n self.Bind(wx.EVT_CHOICE, self.son_on_choice, self.act_son_choose)\n\n bsizer_act.Add(self.st_tips, proportion=0, flag=wx.ALIGN_TOP, border=1)\n bsizer_act.Add(self.act_choose, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)\n bsizer_act.Add(self.st_tips2, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)\n bsizer_act.Add(self.act_son_choose, proportion=0, flag=wx.EXPAND | wx.ALIGN_TOP, border=1)\n\n bsizer_cust_phone_code = wx.BoxSizer(wx.HORIZONTAL)\n\n self.cust_phone_code_tips = wx.StaticText(panel, 0, u\"短信验证码 :\", style=wx.TE_LEFT)\n self.cust_phone_code_box = wx.TextCtrl(panel, style=wx.TE_LEFT)\n self.bt_send_act_msg = wx.Button(panel, label=u'发送验证码')\n self.Bind(wx.EVT_BUTTON, self.mv_send_msg, self.bt_send_act_msg)\n self.user_score_tips = wx.StaticText(panel, 0, u\"剩于点数 :\", style=wx.TE_LEFT)\n self.user_score_amount = wx.StaticText(panel, 0, str(self.user_score), style=wx.TE_LEFT)\n\n self.bt_make_order = wx.Button(panel, label=u'办理')\n self.Bind(wx.EVT_BUTTON, self.make_order, self.bt_make_order)\n\n bsizer_cust_phone_code.Add(self.cust_phone_code_tips, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n\n bsizer_cust_phone_code.Add(self.cust_phone_code_box, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_cust_phone_code.Add(self.bt_send_act_msg, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT,\n border=5)\n bsizer_cust_phone_code.Add(self.bt_make_order, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_cust_phone_code.Add(self.user_score_tips, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bsizer_cust_phone_code.Add(self.user_score_amount, proportion=0,\n flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n\n # bsizer_act.Add(bsizer_cust_phone, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_act.Add(bsizer_cust_phone_code, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n\n # self.act_choose.Select(0)\n # 创建文本内容框,多行,垂直滚动条\n self.text_contents = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.VSCROLL)\n\n # 添加容器,容器中控件按纵向并排排列\n bsizer_center = wx.BoxSizer(wx.HORIZONTAL)\n bsizer_bottom = wx.BoxSizer(wx.HORIZONTAL)\n\n bsizer_bottom.Add(self.text_contents, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n\n # wx.VERTICAL 横向分割\n bsizer_all = wx.BoxSizer(wx.VERTICAL)\n # 添加顶部sizer,proportion=0 代表bsizer_top大小不可变化\n bsizer_all.Add(bsizer_top, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_cust_phone, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_act, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n bsizer_all.Add(bsizer_center, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)\n # 添加顶部sizer,proportion=1 代表bsizer_bottom大小变化\n bsizer_all.Add(bsizer_bottom, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n # self.Bind(wx.EVT_BUTTON, self.onOpen, self.bt_open)\n\n panel.SetSizer(bsizer_all)\n self.SetIcon(GetMondrianIcon())\n\n # with open('login.js', encoding='utf-8') as f:\n # js = f.read()\n js = get_js_file()\n self.ctx = execjs.compile(js)\n self.refrash()\n\n def OnClose(event, evt):\n sys.exit(0)\n\n def set_score(self, score):\n self.user_score_amount.SetLabel(str(score))\n\n def refrash(self, evt=None, phone=None):\n self.rand_number = ''\n self.userInfoUrl = ''\n self.order_son_url = ''\n self.cust_phone = ''\n self.order_son_list = []\n self.act_list = []\n self.smsPassword = ''\n self.order_value = ''\n self.order_id = ''\n self.son_order_data = ''\n self.son_order_value = ''\n self.son_order_id = ''\n self.order_dic = {}\n self.act_list_2 = []\n if evt:\n self.cust_phone_box.SetValue('')\n self.cust_phone_code_box.SetValue('')\n self.act_choose.SetItems([])\n self.act_son_choose.SetItems([])\n self.act_id_box.SetValue('')\n if phone:\n self.cust_phone_box.SetValue(phone)\n self.make_mv_action()\n else:\n if score_switch:\n result = get_score()\n if not result:\n self.Onmsgbox(evt, u'登录失败,验证未通过')\n return\n action_frame.set_score(result['score'])\n self.Onmsgbox(evt, '刷新成功')\n self.text_contents.AppendText(u'刷新成功!\\n')\n\n # def first_request(self):\n # global session\n # session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=10, verify=False)\n # session.get(url='https://211.138.30.200/WSKF/s_channel/login.action', timeout=10, verify=False)\n # return session\n def lock_act(self, evt):\n self.Onmsgbox(evt, '未开放')\n\n def get_frame_code(self, html, key):\n # key = '迁转活动一键办理' '产品订购'\n data = re.findall('menuNodeClick\\((.*?)\\);\">{}'.format(key), html)[0]\n return [i.strip('\\'') for i in data.split(',')]\n\n # 获取用户信息\n def get_userInfo(self):\n global session\n try:\n if self.act_type_choose.GetCurrentSelection() == 0:\n url = ' https://211.138.30.200/WSKF/s_channel/ajaxUserInfoCheck.action'\n params = {\n 'useRequiredId': self.userInfoUrl.split('&')[0],\n 'pay.mobileNoForGuest': self.userInfoUrl.split('&')[1].split('=')[1],\n 't': self.userInfoUrl.split('=')[-1]\n }\n headers = {\n 'Accept': 'text/plain, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(\n self.rand_number),\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep - Alive',\n }\n data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)\n self.text_contents.AppendText('用户信息\\n')\n self.text_contents.AppendText(data.text.replace('', '').replace(' ', '').replace('', ' '))\n self.text_contents.AppendText('\\n')\n return data.text\n else:\n params = {\n 'incrementProductBookCondition.productType': 2,\n 'incrementProductBookCondition.mobileNo': self.cust_phone,\n }\n headers = {\n 'Accept': 'text/plain, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&auth=L&t={}'.format(\n self.rand_number),\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep - Alive',\n }\n data = session.post(url=self.userInfoUrl, headers=headers, data=params, verify=False, timeout=timeout)\n self.text_contents.AppendText('查询中...\\n')\n return data.text\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n\n # 选择办理类型\n def on_choice_act_type(self, evt=None):\n if evt:\n self.refrash(evt, self.cust_phone)\n if self.act_type_choose.GetCurrentSelection() == 1:\n self.act_son_choose.Enable(False)\n self.act_id_box.Enable(True)\n else:\n self.act_son_choose.Enable(True)\n self.act_id_box.Enable(False)\n\n # 验证短信\n def check_msg(self):\n headers = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'X-Prototype-Version': '1.7.2',\n 'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',\n 'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(\n self.rand_number),\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache',\n }\n if self.act_type_choose.GetCurrentSelection() == 1:\n headers[\n 'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(\n self.rand_number)\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxCheckCrmSms.action'\n params = {\n 'mobileNo': self.cust_phone,\n 'checkCode': self.smsPassword\n }\n try:\n data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n except:\n # print(traceback.format_exc())\n pass\n # self.check_userInfo()\n\n # 创建一个任务\n def make_mv_action(self, evt=None):\n\n # mv_frame_params = get_frame_code(html, '迁转活动一键办理')\n self.cust_phone = self.cust_phone_box.GetValue().strip()\n self.user_act_id = self.act_id_box.GetValue().strip()\n\n if not self.cust_phone:\n self.Onmsgbox(None, '先输入手机号')\n return\n # self.rand_number = random.randint(10000, 90000)\n self.rand_number = 40713\n\n headers = {\n 'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/mainAction.action',\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep - Alive',\n }\n if self.act_type_choose.GetCurrentSelection() == 0:\n url = 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action'\n params = {\n 'CBBSaleFlag': 'sale',\n 'auth': 'TCQZ',\n 't': str(self.rand_number)\n }\n else:\n url = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action'\n params = {\n 'incrementProductBookCondition.productType': 2,\n 'CBBSaleFlag': 'sale',\n 'auth': 'L',\n 't': str(self.rand_number)\n }\n\n data = session.get(url=url, headers=headers, data=params, verify=False, timeout=timeout)\n # print(data.status_code)\n if self.act_type_choose.GetCurrentSelection() == 0:\n self.userInfoUrl = \\\n re.findall('/s_channel/ajaxUserInfoCheck\\.action\\?useRequiredId=(.*?\"&t=.*?)\"', data.text)[\n 0].replace(\n '\"+mobileNoForGuest+\"', str(self.cust_phone))\n self.randomString = re.findall('name=\"randomString\" value=\"(.*?)\"', data.text)[0]\n else:\n self.userInfoUrl = 'https://211.138.30.200/WSKF/s_channel/ajaxIncrementProMarketingAction.action'\n self.randomString = re.findall('name=\"randomString\" value=\"(.*?)\"', data.text)[0]\n\n # 获取用户信息\n # userInfo = self.get_userInfo()\n # print(userInfo)\n\n if self.act_type_choose.GetCurrentSelection() == 0:\n if evt:\n self.order_son_url = re.findall(\n '\"(ajaxGetTransferMeal\\.action\\?ployId=\"\\+document\\.getElementById\\(\"ployId\"\\)\\.value\\+\"&serviceMobileNo=\"\\+mobileNoForGuest\\+\"&t=.*?)\"',\n data.text)[0].replace('\"+document.getElementById(\"ployId\").value+\"', '{}').replace(\n '\"+mobileNoForGuest+\"',\n '{}')\n self.act_list = [i.strip('\\t') for i in re.findall(\n ' .*?>.*?>(.*?)',\n data.text)]\n self.act_choose.SetItems(self.act_list)\n # for i in self.act_list:\n # print(i)\n else:\n if evt:\n if self.user_act_id:\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxShowSearchIncBookNext.action'\n headers[\n 'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(\n self.rand_number)\n params = {\n 'phoneForGuest': self.cust_phone,\n 'searchKey': self.user_act_id,\n }\n try:\n data = session.post(url=url, headers=headers, data=params, verify=False, timeout=timeout)\n except requests.ReadTimeout:\n self.Onmsgbox(msg='请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n except:\n print(traceback.format_exc())\n return\n rec_data = data.json()\n # self.act_list = {\"message\": {\n # \"600000611567\": [\"600000611567\", \"19元家庭流量包2个月优惠活动(主副卡)\", \"X13201812015001\", \"1\",\n # \"68元及以上套餐客户订购19元家庭流量包,赠送19元*2个月分摊流量费,参与活动60天优惠期内套餐不能降档,家庭流量包不能退订或降档。\",\n # \"2019-06-05 23:59:59\"]}, \"pageIndex\": 1, \"startIndex\": 0}\n # '('600000266297')\">10元语音包免费用6个月(X00592103)', data.text)[0]\n act_list = re.findall('\\'(.*?)\\'\\)\">(.*?)(.*?)', data.text)\n for i in self.order_son_list:\n self.order_dic[i[1]] = i[0]\n self.act_son_choose.SetItems(list(self.order_dic.keys()))\n except:\n self.order_son_list = []\n if not self.order_son_list:\n self.Onmsgbox(None, '没有套餐可以选择')\n self.text_contents.AppendText('没有套餐可以选择\\n')\n except requests.ReadTimeout:\n self.Onmsgbox('请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n return None\n\n # 68418\n def mv_send_msg(self, evt):\n global session\n self.cust_phone = self.cust_phone_box.GetValue().strip()\n if not self.cust_phone:\n self.Onmsgbox(None, '先输入手机号')\n return\n parms = {\n 'mobileNo': self.cust_phone\n }\n headers = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'X-Prototype-Version': '1.7.2',\n 'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',\n 'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(\n self.rand_number),\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache',\n }\n if self.act_type_choose.GetCurrentSelection() == 1:\n headers[\n 'Referer'] = 'https://211.138.30.200/WSKF/s_channel/incrementProBookInitAction.action?incrementProductBookCondition.productType=2&CBBSaleFlag=sale&t={}'.format(\n self.rand_number)\n try:\n data = session.post(url='https://211.138.30.200/WSKF/s_channel/ajaxSendCrmSms.action', headers=headers,\n data=parms,\n verify=False)\n self.Onmsgbox(evt, data.text)\n self.text_contents.AppendText(data.text + '\\n')\n if data.text == u'随机码发送成功!':\n return True\n else:\n return False\n except requests.ReadTimeout:\n self.Onmsgbox('请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n return None\n\n def make_order(self, evt):\n global session\n try:\n if score_switch:\n # 更新分数\n result = get_score()\n if not result:\n self.Onmsgbox(evt, u'验证未通过')\n return\n else:\n self.set_score(result['score'])\n if result['score'] <= 0:\n self.Onmsgbox(evt, u'剩余点数不足')\n self.no_score = True\n return\n\n # 取消刷新数据\n # self.make_mv_action()\n # if self.no_score:\n # return\n if self.act_type_choose.GetCurrentSelection() == 1:\n self.smsPassword = self.cust_phone_code_box.GetValue().strip()\n\n self.check_msg()\n\n # order_value = self.act_choose.StringSelection\n self.proId = self.order_value.split('/')[1]\n self.proName = self.order_value.split('/')[0]\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxIsPrivalNo.action'\n params = {\n 'mobileNo': self.cust_phone,\n }\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Accept': 'text/html, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProBookListAction.action',\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache',\n }\n data = session.post(url=url, headers=headers, data=params, verify=False)\n\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxIsCheck.action'\n params = {\n 'mobileNo': self.cust_phone,\n 'proId': self.proId,\n }\n data = session.post(url=url, headers=headers, data=params, verify=False)\n\n url = 'https://211.138.30.200/WSKF/s_channel/ajaxWhetherNeedAction.action'\n params = {\n 'mobileNo': self.cust_phone,\n 'product': self.proId,\n 'productType': 3 if self.proId in self.act_list_2 else 2\n }\n data = session.post(url=url, headers=headers, data=params, verify=False)\n url = 'https://211.138.30.200/WSKF/s_channel/incrementProductBookAction.action'\n params = [('randomString', self.randomString),\n ('incrementProductBookCondition.productName', self.proName),\n ('incrementProductBookCondition.mobileNo', self.cust_phone),\n ('incrementProductBookCondition.productType', '3' if self.proId in self.act_list_2 else '2'),\n ('imeiNo', ''),\n ('searchProduct', ''),\n ('searchProduct', ''),\n ('incrementProductBookCondition.product', self.proId),\n ('incrementProductBookCondition.str2', '',)]\n data = session.post(url=url, headers=headers, data=params, verify=False)\n if self.proId not in self.act_list_2:\n self.mainPriceId_N = re.findall(\"name=\\'mainPriceId_N\\' value=\\'(.*?)\\'/>\", data.text)\n\n self.randomString = re.findall('name=\"randomString\" value=\"(.*?)\">', data.text)[0]\n url = 'https://211.138.30.200/WSKF/s_channel/bookIncPro.action'\n mainPriceId_N_params = '|'.join(self.mainPriceId_N) + '|'\n params = [('randomString', self.randomString),\n ('incrementProductBookCondition.productName', ''),\n ('incrementProductBookCondition.product', self.proId),\n ('incrementProductBookCondition.productType', '2'),\n ('mainAndSubPriceId', mainPriceId_N_params),\n ('incProdAtrrs', ''),\n ('incrementProductBookCondition.mobileNo', self.cust_phone),\n ('termIMEI', '',)]\n for i in self.mainPriceId_N:\n params.append(('mainPriceId_N', i))\n headers = {\n 'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/incrementProductBookAction.action',\n 'Accept-Language': 'zh-CN',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache'\n }\n data = session.post(url=url, headers=headers, data=params, verify=False)\n else:\n self.smsPassword = self.cust_phone_code_box.GetValue().strip()\n if not self.smsPassword or not self.cust_phone:\n self.Onmsgbox(None, '信息不完整!')\n return\n self.check_msg()\n url = 'https://211.138.30.200/WSKF/s_channel/mealTransferSubmit.action'\n params = {\n 'randomString': self.randomString,\n 'serviceMobileNo': self.cust_phone,\n 'certType': '200',\n 'password': '',\n 'smsPassword': self.smsPassword.strip(),\n 'ployView': self.order_value,\n 'ployId': self.order_id,\n 'productView': self.son_order_value,\n 'productId': self.son_order_id,\n 'instanceId': '',\n # 'productView': '移动流量王全国版-18元套餐(80分钟+200M)(PIXFXQG1)',\n # 'productId': '100168001088',\n }\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Accept': 'text/html, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://211.138.30.200/WSKF/s_channel/mealTransferInit.action?CBBSaleFlag=sale&auth=TCQZ&t={}'.format(\n self.rand_number),\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': '211.138.30.200',\n 'Connection': 'Keep-Alive',\n 'Cache-Control': 'no-cache',\n }\n data = session.post(url=url, headers=headers, data=params, verify=False)\n try:\n result = re.findall('([\\w\\W]*?)<', data.text)[0].strip()\n except:\n save_html(data.text)\n result = '办理失败'\n self.refrash(evt=evt, phone=self.cust_phone)\n if score_switch:\n # 根据结果是否扣分\n try:\n rev_data = cost_score(result)\n except:\n rev_data = None\n if rev_data:\n self.set_score(rev_data['score'])\n self.Onmsgbox(evt, rev_data['msg'])\n self.text_contents.AppendText(result + '\\n')\n else:\n self.Onmsgbox(evt, '验证失败')\n # print(result)\n else:\n self.Onmsgbox(evt, result)\n self.text_contents.AppendText(result + '\\n')\n self.refrash(evt=evt, phone=self.cust_phone)\n except requests.ReadTimeout:\n self.Onmsgbox('请求超时!')\n print(traceback.format_exc())\n print('请求超时!')\n except:\n print(traceback.format_exc())\n pass\n\n def Onmsgbox(self, event=None, msg=''):\n wx.MessageBox(msg, \"Message\", wx.OK | wx.ICON_INFORMATION)\n\n\ndef get_js_file():\n file = '''function login(obj)\n{\n\tvar workNo = document.getElementById(\"workNo\").value;\n\tvar mac = $(\"#clientMac\").val();\n\tif(!mac){\n\t\tif(workNo == \"A830000\"){\n\t\t\talert(\"login拦截mac为空\");\n\t\t}\n\t\tif(confirm(\"为配合MAC地址信息登记,请您下载并运行控件,以免影响您的登录!\")){\n\t\t\tvar location=window.location.href;\n\t\t\tvar end=location.indexOf(\"WSKF\");\n\t\t\twindow.location.href=location.substr(0,end+4)+\"/download/IE自动设置.zip\";\n\t\t}\n\t\treturn;\n\t}\n\tvar mobileNo = document.getElementById(\"mobileNo\").value;\n\tvar password = document.getElementById(\"password\").value;\n\tvar randCode = document.getElementById(\"randCode\").value;\n\tvar yzmbox = document.getElementById(\"yzmbox\").value;\n\t//var isNumber = /^[-]?\\d+[.]?\\d*$/; //在火狐上此正则表达式不能识别 //||!isNumber.test(mobileNo)\n\tif(mobileNo == 'undefined' || mobileNo.length != 11)\n\t{\n\t\talert(\"请输入11位的电话号码!\");\n\t\treturn ;\n\t}\n\n\tif(workNo == 'undefined' || workNo.length == 0)\n\t{\n\t\talert(\"请输入代理商工号!\");\n\t\treturn ;\n\t}\n\t//var validatePwd = /^(?=.*?[a-z])(?=.*?[A-Z])(?=.*?\\d)(?=.*?[#@*&.])[a-zA-Z\\d#@*&.]*$/;\n\t//var validatePwd = /^(\\w)*([#@$^*&.()]*)/;\n\tif(password == 'undefined' || password.length == 0)\n\t{\n\t\talert(\"请输入密码!\");\n\t\treturn ;\n\t}\n\n\tif(yzmbox =='undefined' || yzmbox.length == 0){\n\t\talert(\"请输入图片验证码!\");\n\t return ;\n\t}\n\tif(randCode == 'undefined' || randCode.length == 0)\n\t{\n\t\talert(\"请输入获取的短信验证码!\");\n\t\treturn ;\n\t}\n\tregistComInfo(workNo);\n\t/*\n\t *针对敏感数据明文传输的漏洞\n\t *对用户密码进行des加密,传到java端后再进行des解密\n\t **/\n\t/*\n\tvar fKey = mobileNo.substring(0);\n\tvar sKey = mobileNo.substring(4);\n\tvar tKey = mobileNo.substring(7);\n\t*/\n\tvar mobileNoNew = strEncode(mobileNo, \"pdcss\", \"css\", \"co\");\n\tvar workNoNew = strEncode(workNo, \"pdcss\", \"css\", \"co\");\n\tvar passwordNew = strEncode(password, \"pdcss\", \"css\", \"co\");\n\tdocument.getElementById(\"mobileNo\").value = mobileNoNew;\n\tdocument.getElementById(\"workNo\").value = workNoNew;\n\tdocument.getElementById(\"password\").value = passwordNew;\n\tobj.click = '';\n\n\n\t\tdocument.forms[0].action=\"ajaxMainAction.action\";\n\t\tdocument.forms[0].submit();\n\n}\nfunction sendRandomUser(workNo, userName, password, yzmbox) {\n try {\n //document.getElementById(\"divSMSButton\").innerHTML=\"\";\n\n\n if (userName == \"\") {\n //document.getElementById(\"divSMSButton\").innerHTML = '点击获取短信验证码';\n //userName.focus();\n return;\n }\n if (workNo == \"\") {\n //document.getElementById(\"divSMSButton\").innerHTML = '点击获取短信验证码';\n //workNo.focus();\n return;\n }\n\n var userNameValue = userName;\n var workNoValue = workNo;\n workNoValue = workNoValue.toUpperCase();\n workNo = workNoValue;\n var pattern = /^[A-Z]\\d{6}$/;\n var regExp = /^((((13[5-9]{1})|(147){1}|(178){1}|(198){1}|(15[0,1,2,7,8,9]{1})|(18[2,3,4,7,8]{1})){1}\\d{1})|((134[0-8]{1}){1})|((3[0-9]{3}))){1}\\d{6,7}$/;\n if (!regExp.exec(userNameValue) || !pattern.test(workNoValue)) {\n return 1;\n }\n\n if (password == \"\") {\n //document.getElementById(\"divSMSButton\").innerHTML = '点击获取短信验证码';\n //password.focus();\n return 2;\n }\n if (yzmbox == \"\") {\n //document.getElementById(\"divSMSButton\").innerHTML = '点击获取短信验证码';\n yzmbox.focus();\n return 3;\n }\n var mobileNoNew = strEncode(userName, \"pdcss\", \"css\", \"co\");\n var workNoNew = strEncode(workNo, \"pdcss\", \"css\", \"co\");\n var passwordNew = strEncode(password, \"pdcss\", \"css\", \"co\");\n return [mobileNoNew, workNoNew, passwordNew]\n } catch (e) {\n return e\n }\n}\n\nfunction onSubmitIt(){\n\t\tenterClickNo = 1;\n\t\tif(document.forms[0].serviceMobileNo.value==''){\n \t\t\talert('请输入变更号码');\n \t\t\tdocument.forms[0].serviceMobileNo.focus();\n \t\t\tenterClickNo = 0;\n \t\t\treturn false;\n \t\t}\n \t\tvar ployId = document.getElementById('ployId').value;\n\t\tif(ployId==''){alert(\"请选择迁转活动\");return ;}\n \t\tvar productId = document.getElementById('productId').value;\n\t\tif(productId==''){alert(\"请选择迁转套餐\");return ;}\n\t \twindow.top.showEstopDiv();\n\t \tdocument.getElementById(\"submitBtn\").style.display = \"none\";\n\t\tdocument.forms[0].action = \"mealTransferSubmit.action\";\n\t\tdocument.forms[0].submit();\n\t}\n\nfunction userInfoCheck(mobileNoForGuest){\n\tvar url = \"../s_channel/ajaxUserInfoCheck.action?useRequiredId=a6380b46-80ef-4ed8-9c8e-89280fc9aca2&pay.mobileNoForGuest=\"+mobileNoForGuest+\"&t=463466\";\n\n\twindow.top.showEstopDiv();\n\tvar userStauts = false;\n\t$.ajax({\n\t\t type: 'POST',\n\t\t url: url,\n\t\t async: false,//异步 true 同步false\n\t\t success: function(data){\n\t\t \t\ttds = data.split(\"~\");\n\t\t \t\t//2014-12-18 是否4G卡标红\n\t\t \t\tvar htmll=tds[1];\n\t\t\t\tvar if4g;\n\t\t\t\tif(htmll.indexOf('是否4G卡')>0){\n\t\t\t\t\tif4g = htmll.substring(htmll.indexOf('是否4G卡'),htmll.length);\n\t\t\t\t\thtmll= htmll.substring(0,htmll.indexOf('是否4G卡'));\n\t\t\t\t\tif4g = \"\"+if4g+\"\";\n\t\t\t\t\ttds[1] = htmll +if4g;\n\t\t\t\t}\n\t\t\t\ttds[1] +=\"    归属地:\"+tds[8]+\"\";\n\n\t \t\t\tvar html = \"客户姓名:\"+tds[0]+\" \"+tds[1];\n\t \t\t\t$(\"#userInfo\").html(html);\n\t \t\t\t$(\"#userInfo1\").html(\"相关信息:\");\n\n\t \t\t\t$(\"#userInfotr\").show();\n\t \t\t\tdocument.getElementById(\"bossId\").value = tds[6];\n\t \t\t\twindow.top.hideEstopDiv();\n\t \t\t\tif(tds[1]==\"参数错误\"||tds[2]==\"参数错误\")\n\t \t\t\t{\n\t \t\t\t\tuserStauts = false;\n\t \t\t\t}\n\t \t\t\telse\n\t \t\t\t{\n\t \t\t\t\tuserStauts = true;\n\t \t\t\t}\n\t\t\t },\n\t\t dataType: \"text\"\n\t\t});\n}\n\nfunction menuNodeClick(url, openModule, auth, urlType, id) {\n var alistNo = '';\n var my ='';\n // var arr = alistNo.split(\"-\");\n // if ($.inArray(id, arr) != -1) {\n // alert(\"对不起,该菜单已下线!\");\n // $('#' + id + \"no\").remove();\n // return;\n // }\n if (url != 'undefined' && url != '' && url.indexOf('#') < 0) {\n if (url.indexOf('?') == -1) {\n url = url + '?auth=' + auth + \"&t=\" + parseInt(100000 * Math.random());\n } else {\n url = url + '&auth=' + auth + \"&t=\" + parseInt(100000 * Math.random());\n }\n if ('other' != urlType) {\n url = \"../\" + url;\n }\n if (openModule == 'N') {\n my = url;\n } else {\n if (url) {\n // window.open(url);\n return url\n }\n }\n if (id) {\n hide(id);\n }\n }\n\n}\n\n\nfunction yzmboxClick() {\n //location.reload(true);\n var rand = Math.random();\n var url = \"https://211.138.30.200/WSKF/s_channel/verifyCodeGenerator.action?rand=\" + rand;\n return url\n\n}\n\nfunction strEncode(data, firstKey, secondKey, thirdKey) {\n var leng = data.length;\n var encData = \"\";\n var firstKeyBt, secondKeyBt, thirdKeyBt, firstLength, secondLength, thirdLength;\n if (firstKey != null && firstKey != \"\") {\n firstKeyBt = getKeyBytes(firstKey);\n firstLength = firstKeyBt.length;\n }\n if (secondKey != null && secondKey != \"\") {\n secondKeyBt = getKeyBytes(secondKey);\n secondLength = secondKeyBt.length;\n }\n if (thirdKey != null && thirdKey != \"\") {\n thirdKeyBt = getKeyBytes(thirdKey);\n thirdLength = thirdKeyBt.length;\n }\n\n if (leng > 0) {\n if (leng < 4) {\n var bt = strToBt(data);\n var encByte;\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\" && thirdKey != null && thirdKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = bt;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n for (z = 0; z < thirdLength; z++) {\n tempBt = enc(tempBt, thirdKeyBt[z]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\") {\n var tempBt;\n var x, y;\n tempBt = bt;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\") {\n var tempBt;\n var x = 0;\n tempBt = bt;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n encByte = tempBt;\n }\n }\n }\n encData = bt64ToHex(encByte);\n } else {\n var iterator = parseInt(leng / 4);\n var remainder = leng % 4;\n var i = 0;\n for (i = 0; i < iterator; i++) {\n var tempData = data.substring(i * 4 + 0, i * 4 + 4);\n var tempByte = strToBt(tempData);\n var encByte;\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\" && thirdKey != null && thirdKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n for (z = 0; z < thirdLength; z++) {\n tempBt = enc(tempBt, thirdKeyBt[z]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\") {\n var tempBt;\n var x, y;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\") {\n var tempBt;\n var x;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n encByte = tempBt;\n }\n }\n }\n encData += bt64ToHex(encByte);\n }\n if (remainder > 0) {\n var remainderData = data.substring(iterator * 4 + 0, leng);\n var tempByte = strToBt(remainderData);\n var encByte;\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\" && thirdKey != null && thirdKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n for (z = 0; z < thirdLength; z++) {\n tempBt = enc(tempBt, thirdKeyBt[z]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\") {\n var tempBt;\n var x, y;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n for (y = 0; y < secondLength; y++) {\n tempBt = enc(tempBt, secondKeyBt[y]);\n }\n encByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\") {\n var tempBt;\n var x;\n tempBt = tempByte;\n for (x = 0; x < firstLength; x++) {\n tempBt = enc(tempBt, firstKeyBt[x]);\n }\n encByte = tempBt;\n }\n }\n }\n encData += bt64ToHex(encByte);\n }\n }\n }\n return encData;\n}\n\n/*\n* decrypt the encrypted string to the original string\n*\n* return the original string\n*/\n\n/*解密的函数,三个密钥(解密密钥同加密密钥)*/\nfunction strDecode(data, firstKey, secondKey, thirdKey) {\n var leng = data.length;\n var decStr = \"\";\n var firstKeyBt, secondKeyBt, thirdKeyBt, firstLength, secondLength, thirdLength;\n if (firstKey != null && firstKey != \"\") {\n firstKeyBt = getKeyBytes(firstKey);\n firstLength = firstKeyBt.length;\n }\n if (secondKey != null && secondKey != \"\") {\n secondKeyBt = getKeyBytes(secondKey);\n secondLength = secondKeyBt.length;\n }\n if (thirdKey != null && thirdKey != \"\") {\n thirdKeyBt = getKeyBytes(thirdKey);\n thirdLength = thirdKeyBt.length;\n }\n var iterator = parseInt(leng / 16);\n var i = 0;\n for (i = 0; i < iterator; i++) {\n var tempData = data.substring(i * 16 + 0, i * 16 + 16);\n var strByte = hexToBt64(tempData);\n var intByte = new Array(64);\n var j = 0;\n for (j = 0; j < 64; j++) {\n intByte[j] = parseInt(strByte.substring(j, j + 1));\n }\n var decByte;\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\" && thirdKey != null && thirdKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = intByte;\n for (x = thirdLength - 1; x >= 0; x--) {\n tempBt = dec(tempBt, thirdKeyBt[x]);\n }\n for (y = secondLength - 1; y >= 0; y--) {\n tempBt = dec(tempBt, secondKeyBt[y]);\n }\n for (z = firstLength - 1; z >= 0; z--) {\n tempBt = dec(tempBt, firstKeyBt[z]);\n }\n decByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\" && secondKey != null && secondKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = intByte;\n for (x = secondLength - 1; x >= 0; x--) {\n tempBt = dec(tempBt, secondKeyBt[x]);\n }\n for (y = firstLength - 1; y >= 0; y--) {\n tempBt = dec(tempBt, firstKeyBt[y]);\n }\n decByte = tempBt;\n } else {\n if (firstKey != null && firstKey != \"\") {\n var tempBt;\n var x, y, z;\n tempBt = intByte;\n for (x = firstLength - 1; x >= 0; x--) {\n tempBt = dec(tempBt, firstKeyBt[x]);\n }\n decByte = tempBt;\n }\n }\n }\n decStr += byteToString(decByte);\n }\n return decStr;\n}\n\n/*\n* chang the string into the bit array\n*\n* return bit array(it's length % 64 = 0)\n*/\nfunction getKeyBytes(key) {\n var keyBytes = new Array();\n var leng = key.length;\n var iterator = parseInt(leng / 4);\n var remainder = leng % 4;\n var i = 0;\n for (i = 0; i < iterator; i++) {\n keyBytes[i] = strToBt(key.substring(i * 4 + 0, i * 4 + 4));\n }\n if (remainder > 0) {\n keyBytes[i] = strToBt(key.substring(i * 4 + 0, leng));\n }\n return keyBytes;\n}\n\n/*\n* chang the string(it's length <= 4) into the bit array\n*\n* return bit array(it's length = 64)\n*/\nfunction strToBt(str) {\n var leng = str.length;\n var bt = new Array(64);\n if (leng < 4) {\n var i = 0, j = 0, p = 0, q = 0;\n for (i = 0; i < leng; i++) {\n var k = str.charCodeAt(i);\n for (j = 0; j < 16; j++) {\n var pow = 1, m = 0;\n for (m = 15; m > j; m--) {\n pow *= 2;\n }\n bt[16 * i + j] = parseInt(k / pow) % 2;\n }\n }\n for (p = leng; p < 4; p++) {\n var k = 0;\n for (q = 0; q < 16; q++) {\n var pow = 1, m = 0;\n for (m = 15; m > q; m--) {\n pow *= 2;\n }\n bt[16 * p + q] = parseInt(k / pow) % 2;\n }\n }\n } else {\n for (i = 0; i < 4; i++) {\n var k = str.charCodeAt(i);\n for (j = 0; j < 16; j++) {\n var pow = 1;\n for (m = 15; m > j; m--) {\n pow *= 2;\n }\n bt[16 * i + j] = parseInt(k / pow) % 2;\n }\n }\n }\n return bt;\n}\n\n/*\n* chang the bit(it's length = 4) into the hex\n*\n* return hex\n*/\nfunction bt4ToHex(binary) {\n var hex;\n switch (binary) {\n case \"0000\" :\n hex = \"0\";\n break;\n case \"0001\" :\n hex = \"1\";\n break;\n case \"0010\" :\n hex = \"2\";\n break;\n case \"0011\" :\n hex = \"3\";\n break;\n case \"0100\" :\n hex = \"4\";\n break;\n case \"0101\" :\n hex = \"5\";\n break;\n case \"0110\" :\n hex = \"6\";\n break;\n case \"0111\" :\n hex = \"7\";\n break;\n case \"1000\" :\n hex = \"8\";\n break;\n case \"1001\" :\n hex = \"9\";\n break;\n case \"1010\" :\n hex = \"A\";\n break;\n case \"1011\" :\n hex = \"B\";\n break;\n case \"1100\" :\n hex = \"C\";\n break;\n case \"1101\" :\n hex = \"D\";\n break;\n case \"1110\" :\n hex = \"E\";\n break;\n case \"1111\" :\n hex = \"F\";\n break;\n }\n return hex;\n}\n\n/*\n* chang the hex into the bit(it's length = 4)\n*\n* return the bit(it's length = 4)\n*/\nfunction hexToBt4(hex) {\n var binary;\n switch (hex) {\n case \"0\" :\n binary = \"0000\";\n break;\n case \"1\" :\n binary = \"0001\";\n break;\n case \"2\" :\n binary = \"0010\";\n break;\n case \"3\" :\n binary = \"0011\";\n break;\n case \"4\" :\n binary = \"0100\";\n break;\n case \"5\" :\n binary = \"0101\";\n break;\n case \"6\" :\n binary = \"0110\";\n break;\n case \"7\" :\n binary = \"0111\";\n break;\n case \"8\" :\n binary = \"1000\";\n break;\n case \"9\" :\n binary = \"1001\";\n break;\n case \"A\" :\n binary = \"1010\";\n break;\n case \"B\" :\n binary = \"1011\";\n break;\n case \"C\" :\n binary = \"1100\";\n break;\n case \"D\" :\n binary = \"1101\";\n break;\n case \"E\" :\n binary = \"1110\";\n break;\n case \"F\" :\n binary = \"1111\";\n break;\n }\n return binary;\n}\n\n/*\n* chang the bit(it's length = 64) into the string\n*\n* return string\n*/\nfunction byteToString(byteData) {\n var str = \"\";\n for (i = 0; i < 4; i++) {\n var count = 0;\n for (j = 0; j < 16; j++) {\n var pow = 1;\n for (m = 15; m > j; m--) {\n pow *= 2;\n }\n count += byteData[16 * i + j] * pow;\n }\n if (count != 0) {\n str += String.fromCharCode(count);\n }\n }\n return str;\n}\n\nfunction bt64ToHex(byteData) {\n var hex = \"\";\n for (i = 0; i < 16; i++) {\n var bt = \"\";\n for (j = 0; j < 4; j++) {\n bt += byteData[i * 4 + j];\n }\n hex += bt4ToHex(bt);\n }\n return hex;\n}\n\nfunction hexToBt64(hex) {\n var binary = \"\";\n for (i = 0; i < 16; i++) {\n binary += hexToBt4(hex.substring(i, i + 1));\n }\n return binary;\n}\n\n/*\n* the 64 bit des core arithmetic\n*/\n\nfunction enc(dataByte, keyByte) {\n var keys = generateKeys(keyByte);\n var ipByte = initPermute(dataByte);\n var ipLeft = new Array(32);\n var ipRight = new Array(32);\n var tempLeft = new Array(32);\n var i = 0, j = 0, k = 0, m = 0, n = 0;\n for (k = 0; k < 32; k++) {\n ipLeft[k] = ipByte[k];\n ipRight[k] = ipByte[32 + k];\n }\n for (i = 0; i < 16; i++) {\n for (j = 0; j < 32; j++) {\n tempLeft[j] = ipLeft[j];\n ipLeft[j] = ipRight[j];\n }\n var key = new Array(48);\n for (m = 0; m < 48; m++) {\n key[m] = keys[i][m];\n }\n var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight), key))), tempLeft);\n for (n = 0; n < 32; n++) {\n ipRight[n] = tempRight[n];\n }\n }\n\n\n var finalData = new Array(64);\n for (i = 0; i < 32; i++) {\n finalData[i] = ipRight[i];\n finalData[32 + i] = ipLeft[i];\n }\n return finallyPermute(finalData);\n}\n\nfunction dec(dataByte, keyByte) {\n var keys = generateKeys(keyByte);\n var ipByte = initPermute(dataByte);\n var ipLeft = new Array(32);\n var ipRight = new Array(32);\n var tempLeft = new Array(32);\n var i = 0, j = 0, k = 0, m = 0, n = 0;\n for (k = 0; k < 32; k++) {\n ipLeft[k] = ipByte[k];\n ipRight[k] = ipByte[32 + k];\n }\n for (i = 15; i >= 0; i--) {\n for (j = 0; j < 32; j++) {\n tempLeft[j] = ipLeft[j];\n ipLeft[j] = ipRight[j];\n }\n var key = new Array(48);\n for (m = 0; m < 48; m++) {\n key[m] = keys[i][m];\n }\n\n var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight), key))), tempLeft);\n for (n = 0; n < 32; n++) {\n ipRight[n] = tempRight[n];\n }\n }\n\n var finalData = new Array(64);\n for (i = 0; i < 32; i++) {\n finalData[i] = ipRight[i];\n finalData[32 + i] = ipLeft[i];\n }\n return finallyPermute(finalData);\n}\n\nfunction initPermute(originalData) {\n var ipByte = new Array(64);\n for (i = 0, m = 1, n = 0; i < 4; i++, m += 2, n += 2) {\n for (j = 7, k = 0; j >= 0; j--, k++) {\n ipByte[i * 8 + k] = originalData[j * 8 + m];\n ipByte[i * 8 + k + 32] = originalData[j * 8 + n];\n }\n }\n return ipByte;\n}\n\nfunction expandPermute(rightData) {\n var epByte = new Array(48);\n for (i = 0; i < 8; i++) {\n if (i == 0) {\n epByte[i * 6 + 0] = rightData[31];\n } else {\n epByte[i * 6 + 0] = rightData[i * 4 - 1];\n }\n epByte[i * 6 + 1] = rightData[i * 4 + 0];\n epByte[i * 6 + 2] = rightData[i * 4 + 1];\n epByte[i * 6 + 3] = rightData[i * 4 + 2];\n epByte[i * 6 + 4] = rightData[i * 4 + 3];\n if (i == 7) {\n epByte[i * 6 + 5] = rightData[0];\n } else {\n epByte[i * 6 + 5] = rightData[i * 4 + 4];\n }\n }\n return epByte;\n}\n\nfunction xor(byteOne, byteTwo) {\n var xorByte = new Array(byteOne.length);\n for (i = 0; i < byteOne.length; i++) {\n xorByte[i] = byteOne[i] ^ byteTwo[i];\n }\n return xorByte;\n}\n\nfunction sBoxPermute(expandByte) {\n\n var sBoxByte = new Array(32);\n var binary = \"\";\n var s1 = [\n [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],\n [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],\n [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],\n [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]];\n /* Table - s2 */\n var s2 = [\n [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],\n [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],\n [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],\n [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]];\n /* Table - s3 */\n var s3 = [\n [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],\n [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],\n [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],\n [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]];\n /* Table - s4 */\n var s4 = [\n [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],\n [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],\n [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],\n [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]];\n /* Table - s5 */\n var s5 = [\n [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],\n [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],\n [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],\n [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]];\n /* Table - s6 */\n var s6 = [\n [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],\n [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],\n [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],\n [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]];\n /* Table - s7 */\n var s7 = [\n [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],\n [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],\n [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],\n [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]];\n /* Table - s8 */\n var s8 = [\n [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],\n [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],\n [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],\n [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]];\n\n for (m = 0; m < 8; m++) {\n var i = 0, j = 0;\n i = expandByte[m * 6 + 0] * 2 + expandByte[m * 6 + 5];\n j = expandByte[m * 6 + 1] * 2 * 2 * 2\n + expandByte[m * 6 + 2] * 2 * 2\n + expandByte[m * 6 + 3] * 2\n + expandByte[m * 6 + 4];\n switch (m) {\n case 0 :\n binary = getBoxBinary(s1[i][j]);\n break;\n case 1 :\n binary = getBoxBinary(s2[i][j]);\n break;\n case 2 :\n binary = getBoxBinary(s3[i][j]);\n break;\n case 3 :\n binary = getBoxBinary(s4[i][j]);\n break;\n case 4 :\n binary = getBoxBinary(s5[i][j]);\n break;\n case 5 :\n binary = getBoxBinary(s6[i][j]);\n break;\n case 6 :\n binary = getBoxBinary(s7[i][j]);\n break;\n case 7 :\n binary = getBoxBinary(s8[i][j]);\n break;\n }\n sBoxByte[m * 4 + 0] = parseInt(binary.substring(0, 1));\n sBoxByte[m * 4 + 1] = parseInt(binary.substring(1, 2));\n sBoxByte[m * 4 + 2] = parseInt(binary.substring(2, 3));\n sBoxByte[m * 4 + 3] = parseInt(binary.substring(3, 4));\n }\n return sBoxByte;\n}\n\nfunction pPermute(sBoxByte) {\n var pBoxPermute = new Array(32);\n pBoxPermute[0] = sBoxByte[15];\n pBoxPermute[1] = sBoxByte[6];\n pBoxPermute[2] = sBoxByte[19];\n pBoxPermute[3] = sBoxByte[20];\n pBoxPermute[4] = sBoxByte[28];\n pBoxPermute[5] = sBoxByte[11];\n pBoxPermute[6] = sBoxByte[27];\n pBoxPermute[7] = sBoxByte[16];\n pBoxPermute[8] = sBoxByte[0];\n pBoxPermute[9] = sBoxByte[14];\n pBoxPermute[10] = sBoxByte[22];\n pBoxPermute[11] = sBoxByte[25];\n pBoxPermute[12] = sBoxByte[4];\n pBoxPermute[13] = sBoxByte[17];\n pBoxPermute[14] = sBoxByte[30];\n pBoxPermute[15] = sBoxByte[9];\n pBoxPermute[16] = sBoxByte[1];\n pBoxPermute[17] = sBoxByte[7];\n pBoxPermute[18] = sBoxByte[23];\n pBoxPermute[19] = sBoxByte[13];\n pBoxPermute[20] = sBoxByte[31];\n pBoxPermute[21] = sBoxByte[26];\n pBoxPermute[22] = sBoxByte[2];\n pBoxPermute[23] = sBoxByte[8];\n pBoxPermute[24] = sBoxByte[18];\n pBoxPermute[25] = sBoxByte[12];\n pBoxPermute[26] = sBoxByte[29];\n pBoxPermute[27] = sBoxByte[5];\n pBoxPermute[28] = sBoxByte[21];\n pBoxPermute[29] = sBoxByte[10];\n pBoxPermute[30] = sBoxByte[3];\n pBoxPermute[31] = sBoxByte[24];\n return pBoxPermute;\n}\n\nfunction finallyPermute(endByte) {\n var fpByte = new Array(64);\n fpByte[0] = endByte[39];\n fpByte[1] = endByte[7];\n fpByte[2] = endByte[47];\n fpByte[3] = endByte[15];\n fpByte[4] = endByte[55];\n fpByte[5] = endByte[23];\n fpByte[6] = endByte[63];\n fpByte[7] = endByte[31];\n fpByte[8] = endByte[38];\n fpByte[9] = endByte[6];\n fpByte[10] = endByte[46];\n fpByte[11] = endByte[14];\n fpByte[12] = endByte[54];\n fpByte[13] = endByte[22];\n fpByte[14] = endByte[62];\n fpByte[15] = endByte[30];\n fpByte[16] = endByte[37];\n fpByte[17] = endByte[5];\n fpByte[18] = endByte[45];\n fpByte[19] = endByte[13];\n fpByte[20] = endByte[53];\n fpByte[21] = endByte[21];\n fpByte[22] = endByte[61];\n fpByte[23] = endByte[29];\n fpByte[24] = endByte[36];\n fpByte[25] = endByte[4];\n fpByte[26] = endByte[44];\n fpByte[27] = endByte[12];\n fpByte[28] = endByte[52];\n fpByte[29] = endByte[20];\n fpByte[30] = endByte[60];\n fpByte[31] = endByte[28];\n fpByte[32] = endByte[35];\n fpByte[33] = endByte[3];\n fpByte[34] = endByte[43];\n fpByte[35] = endByte[11];\n fpByte[36] = endByte[51];\n fpByte[37] = endByte[19];\n fpByte[38] = endByte[59];\n fpByte[39] = endByte[27];\n fpByte[40] = endByte[34];\n fpByte[41] = endByte[2];\n fpByte[42] = endByte[42];\n fpByte[43] = endByte[10];\n fpByte[44] = endByte[50];\n fpByte[45] = endByte[18];\n fpByte[46] = endByte[58];\n fpByte[47] = endByte[26];\n fpByte[48] = endByte[33];\n fpByte[49] = endByte[1];\n fpByte[50] = endByte[41];\n fpByte[51] = endByte[9];\n fpByte[52] = endByte[49];\n fpByte[53] = endByte[17];\n fpByte[54] = endByte[57];\n fpByte[55] = endByte[25];\n fpByte[56] = endByte[32];\n fpByte[57] = endByte[0];\n fpByte[58] = endByte[40];\n fpByte[59] = endByte[8];\n fpByte[60] = endByte[48];\n fpByte[61] = endByte[16];\n fpByte[62] = endByte[56];\n fpByte[63] = endByte[24];\n return fpByte;\n}\n\nfunction getBoxBinary(i) {\n var binary = \"\";\n switch (i) {\n case 0 :\n binary = \"0000\";\n break;\n case 1 :\n binary = \"0001\";\n break;\n case 2 :\n binary = \"0010\";\n break;\n case 3 :\n binary = \"0011\";\n break;\n case 4 :\n binary = \"0100\";\n break;\n case 5 :\n binary = \"0101\";\n break;\n case 6 :\n binary = \"0110\";\n break;\n case 7 :\n binary = \"0111\";\n break;\n case 8 :\n binary = \"1000\";\n break;\n case 9 :\n binary = \"1001\";\n break;\n case 10 :\n binary = \"1010\";\n break;\n case 11 :\n binary = \"1011\";\n break;\n case 12 :\n binary = \"1100\";\n break;\n case 13 :\n binary = \"1101\";\n break;\n case 14 :\n binary = \"1110\";\n break;\n case 15 :\n binary = \"1111\";\n break;\n }\n return binary;\n}\n\n/*\n* generate 16 keys for xor\n*\n*/\nfunction generateKeys(keyByte) {\n var key = new Array(56);\n var keys = new Array();\n\n keys[0] = new Array();\n keys[1] = new Array();\n keys[2] = new Array();\n keys[3] = new Array();\n keys[4] = new Array();\n keys[5] = new Array();\n keys[6] = new Array();\n keys[7] = new Array();\n keys[8] = new Array();\n keys[9] = new Array();\n keys[10] = new Array();\n keys[11] = new Array();\n keys[12] = new Array();\n keys[13] = new Array();\n keys[14] = new Array();\n keys[15] = new Array();\n var loop = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];\n\n for (i = 0; i < 7; i++) {\n for (j = 0, k = 7; j < 8; j++, k--) {\n key[i * 8 + j] = keyByte[8 * k + i];\n }\n }\n\n var i = 0;\n for (i = 0; i < 16; i++) {\n var tempLeft = 0;\n var tempRight = 0;\n for (j = 0; j < loop[i]; j++) {\n tempLeft = key[0];\n tempRight = key[28];\n for (k = 0; k < 27; k++) {\n key[k] = key[k + 1];\n key[28 + k] = key[29 + k];\n }\n key[27] = tempLeft;\n key[55] = tempRight;\n }\n var tempKey = new Array(48);\n tempKey[0] = key[13];\n tempKey[1] = key[16];\n tempKey[2] = key[10];\n tempKey[3] = key[23];\n tempKey[4] = key[0];\n tempKey[5] = key[4];\n tempKey[6] = key[2];\n tempKey[7] = key[27];\n tempKey[8] = key[14];\n tempKey[9] = key[5];\n tempKey[10] = key[20];\n tempKey[11] = key[9];\n tempKey[12] = key[22];\n tempKey[13] = key[18];\n tempKey[14] = key[11];\n tempKey[15] = key[3];\n tempKey[16] = key[25];\n tempKey[17] = key[7];\n tempKey[18] = key[15];\n tempKey[19] = key[6];\n tempKey[20] = key[26];\n tempKey[21] = key[19];\n tempKey[22] = key[12];\n tempKey[23] = key[1];\n tempKey[24] = key[40];\n tempKey[25] = key[51];\n tempKey[26] = key[30];\n tempKey[27] = key[36];\n tempKey[28] = key[46];\n tempKey[29] = key[54];\n tempKey[30] = key[29];\n tempKey[31] = key[39];\n tempKey[32] = key[50];\n tempKey[33] = key[44];\n tempKey[34] = key[32];\n tempKey[35] = key[47];\n tempKey[36] = key[43];\n tempKey[37] = key[48];\n tempKey[38] = key[38];\n tempKey[39] = key[55];\n tempKey[40] = key[33];\n tempKey[41] = key[52];\n tempKey[42] = key[45];\n tempKey[43] = key[41];\n tempKey[44] = key[49];\n tempKey[45] = key[35];\n tempKey[46] = key[28];\n tempKey[47] = key[31];\n switch (i) {\n case 0:\n for (m = 0; m < 48; m++) {\n keys[0][m] = tempKey[m];\n }\n break;\n case 1:\n for (m = 0; m < 48; m++) {\n keys[1][m] = tempKey[m];\n }\n break;\n case 2:\n for (m = 0; m < 48; m++) {\n keys[2][m] = tempKey[m];\n }\n break;\n case 3:\n for (m = 0; m < 48; m++) {\n keys[3][m] = tempKey[m];\n }\n break;\n case 4:\n for (m = 0; m < 48; m++) {\n keys[4][m] = tempKey[m];\n }\n break;\n case 5:\n for (m = 0; m < 48; m++) {\n keys[5][m] = tempKey[m];\n }\n break;\n case 6:\n for (m = 0; m < 48; m++) {\n keys[6][m] = tempKey[m];\n }\n break;\n case 7:\n for (m = 0; m < 48; m++) {\n keys[7][m] = tempKey[m];\n }\n break;\n case 8:\n for (m = 0; m < 48; m++) {\n keys[8][m] = tempKey[m];\n }\n break;\n case 9:\n for (m = 0; m < 48; m++) {\n keys[9][m] = tempKey[m];\n }\n break;\n\n case 10:\n for (m = 0; m < 48; m++) {\n keys[10][m] = tempKey[m];\n }\n break;\n case 11:\n for (m = 0; m < 48; m++) {\n keys[11][m] = tempKey[m];\n }\n break;\n case 12:\n for (m = 0; m < 48; m++) {\n keys[12][m] = tempKey[m];\n }\n break;\n case 13:\n for (m = 0; m < 48; m++) {\n keys[13][m] = tempKey[m];\n }\n break;\n case 14:\n for (m = 0; m < 48; m++) {\n keys[14][m] = tempKey[m];\n }\n break;\n case 15:\n for (m = 0; m < 48; m++) {\n keys[15][m] = tempKey[m];\n }\n break;\n }\n }\n return keys;\n}\n\n/*end*/'''\n return file\n\n\nif __name__ == '__main__':\n # clientMac = '00:25:11:EB:27:98'\n # login_phone = '18303995539'\n # workNo = 'J120249'\n # login_password = '100861'\n clientMac = ''\n login_phone = ''\n workNo = ''\n login_password = ''\n app = wx.App()\n login_frame = LOGIN(parent=None, id=-1)\n action_frame = YD_MAKE_ORDER(parent=None, id=-1)\n login_frame.Show()\n login_frame.Center()\n # frame = YD_MAKE_ORDER(parent=None, id=-1)\n # frame.Show()\n # frame.Center()\n app.MainLoop()\n", "sub_path": "uploadFiles/ymc/main_old.py", "file_name": "main_old.py", "file_ext": "py", "file_size_in_byte": 104427, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 51, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 51, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 51, "usage_type": "attribute"}, {"api_name": "base64.encodebytes", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 73, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 73, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 73, "usage_type": "attribute"}, {"api_name": "base64.decodebytes", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 185, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 189, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 190, "usage_type": "call"}, {"api_name": "wx.Icon", "line_number": 194, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 200, "usage_type": "call"}, {"api_name": "uuid.getnode", "line_number": 200, "usage_type": "call"}, {"api_name": "time.time", "line_number": 209, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 212, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 214, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 233, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 234, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 237, "usage_type": "call"}, {"api_name": "urllib3.disable_warnings", "line_number": 249, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 252, "usage_type": "call"}, {"api_name": "time.time", "line_number": 265, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 275, "usage_type": "attribute"}, {"api_name": "wx.Locale", "line_number": 277, "usage_type": "call"}, {"api_name": "wx.LANGUAGE_ENGLISH", "line_number": 277, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 278, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 278, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.EVT_CLOSE", "line_number": 281, "usage_type": "attribute"}, {"api_name": "execjs.compile", "line_number": 286, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 288, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 288, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 289, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 289, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 290, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 290, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 291, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 291, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 292, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 292, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 293, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 293, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 294, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 294, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 296, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 296, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 297, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 297, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 298, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 298, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 298, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 299, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 299, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 299, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 300, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 300, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 300, "usage_type": "attribute"}, {"api_name": "wx.Image", "line_number": 303, "usage_type": "call"}, {"api_name": "wx.BITMAP_TYPE_JPEG", "line_number": 303, "usage_type": "attribute"}, {"api_name": "wx.BitmapButton", "line_number": 304, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 306, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 306, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 306, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 307, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 308, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 310, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 311, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 313, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 314, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 314, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 314, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 316, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 316, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 316, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 318, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 320, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 320, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 321, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 321, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 322, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 322, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 323, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 323, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 324, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 324, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 325, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 325, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 327, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 327, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 327, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 330, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 330, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 330, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 332, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 332, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 332, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 334, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 334, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 334, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 338, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 338, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 338, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 340, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 340, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 340, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 342, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 342, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 342, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 344, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 344, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 344, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 348, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 348, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 348, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 350, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 350, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 350, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 351, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 351, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 351, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 353, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 356, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 356, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 356, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 358, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 358, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 358, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 360, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 360, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 360, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 363, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 363, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 365, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 365, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 367, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 367, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 368, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 368, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 369, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 369, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 370, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 370, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 371, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 371, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 378, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 381, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 381, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 381, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 394, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 396, "usage_type": "call"}, {"api_name": "wx.BITMAP_TYPE_JPEG", "line_number": 396, "usage_type": "attribute"}, {"api_name": "wx.Bitmap", "line_number": 398, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 446, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 448, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 457, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 463, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 500, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 501, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 508, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 510, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 514, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 578, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 593, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 594, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 602, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 604, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 609, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 631, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 633, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 640, "usage_type": "attribute"}, {"api_name": "wx.Locale", "line_number": 642, "usage_type": "call"}, {"api_name": "wx.LANGUAGE_ENGLISH", "line_number": 642, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 643, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 643, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 647, "usage_type": "call"}, {"api_name": "wx.EVT_CLOSE", "line_number": 648, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 651, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 651, "usage_type": "attribute"}, {"api_name": "wx.Choice", "line_number": 653, "usage_type": "call"}, {"api_name": "wx.EVT_CHOICE", "line_number": 655, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 657, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 658, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 659, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 660, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 664, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 664, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 665, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 665, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 666, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 666, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 668, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 668, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 669, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 669, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 670, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 670, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 673, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 673, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 676, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 678, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 679, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 679, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 679, "usage_type": "attribute"}, {"api_name": "wx.CENTER", "line_number": 680, "usage_type": "attribute"}, {"api_name": "wx.CENTER", "line_number": 681, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 682, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 682, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 688, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 688, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 689, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 689, "usage_type": "attribute"}, {"api_name": "wx.Choice", "line_number": 691, "usage_type": "call"}, {"api_name": "wx.EVT_CHOICE", "line_number": 692, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 694, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 694, "usage_type": "attribute"}, {"api_name": "wx.Choice", "line_number": 696, "usage_type": "call"}, {"api_name": "wx.EVT_CHOICE", "line_number": 697, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_TOP", "line_number": 699, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 700, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_TOP", "line_number": 700, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 701, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_TOP", "line_number": 701, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 702, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_TOP", "line_number": 702, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 704, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 704, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 706, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 706, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 707, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 707, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 708, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 709, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 710, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 710, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 711, "usage_type": "call"}, {"api_name": "wx.TE_LEFT", "line_number": 711, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 713, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 714, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 717, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 717, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 717, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 721, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 721, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 721, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 724, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 724, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 724, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 727, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 727, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 727, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 729, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 729, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 729, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 731, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 731, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 731, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 734, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 734, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 738, "usage_type": "call"}, {"api_name": "wx.TE_MULTILINE", "line_number": 738, "usage_type": "attribute"}, {"api_name": "wx.VSCROLL", "line_number": 738, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 741, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 741, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 742, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 742, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 744, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 744, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 747, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 747, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 749, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 749, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 750, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 750, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 751, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 751, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 752, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 752, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 754, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 754, "usage_type": "attribute"}, {"api_name": "execjs.compile", "line_number": 763, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 767, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 816, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 865, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 867, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 908, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 910, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 959, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 962, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 965, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 973, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 978, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 997, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 999, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1002, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 1065, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 1067, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1093, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1094, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 1096, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 1098, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1145, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 1154, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 1156, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 1198, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 1200, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1276, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1278, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 1339, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 1361, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 1363, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1366, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 1370, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 1370, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 1370, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 2617, "usage_type": "call"}]} +{"seq_id": "107860284", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 01:15:02 2019\n\n@author: chaztikov\n\"\"\"\n\nimport os;import numpy as np;import pandas as pd\nimport os,sys,re,subprocess\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport scipy.integrate\nfrom scipy.spatial import KDTree\nfrom scipy.interpolate import BSpline\nfrom scipy.interpolate import splrep, splder, sproot,make_interp_spline\nimport scipy.sparse.linalg as spla\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nimport sklearn.decomposition\nfrom sklearn.decomposition import PCA\n\ncwd = os.getcwd()\ndname = '/home/chaztikov/git/aorta_piv_data/data/original/'\nfnames = os.listdir(dname)\nfnames = ['OpenAreaPerimountWaterbpm60.txt']\n\n# for ifname0,fname0 in enumerate(fnames[:-2]):\nfor ifname0,fname0 in enumerate(fnames):\n fname = dname+fname0\n \n try:\n df = pd.read_csv(fname)\n print(df.columns)\n print(df.shape)\n xx = df.values[:,0]\n yy = df.values[:,1]\n except Exception:\n df = np.loadtxt(fname)\n xx = df[:,0]\n yy = df[:,1]\n \n HR=1\n npeaks = 13\n phi0 = 14150-1\n# phi0 = 0\n phi0 = int(phi0)\n ntau = int(5)\n #ntau = 10\n ntau = int(ntau)\n tau= int(60/HR)\n \n \n plt.figure()\n plt.plot(xx,yy,'b')\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Raw Signal')\n plt.title(fname0)\n plt.savefig('raw_'+str(ifname0)+'.png')\n plt.show()\n \n try:\n xx = df.values[phi0:, 0]\n yy = df.values[phi0:, 1]\n except Exception:\n xx = df[phi0:,0]\n yy = df[phi0:,1]\n\n dyy = np.diff(yy)\n \n nbins = np.sqrt(yy.shape[0] * 1 ).astype(int) \n inz = np.where(yy>0)[0]\n idnz = np.where(np.abs(dyy)>0)[0]\n dyynz = dyy[idnz]\n dyynz = dyy\n pdc = np.percentile(np.abs(dyynz),99.9)\n iddc = np.where(dyynz>pdc )\n peaks = np.sort(np.abs(dyy))[::-1][:2*npeaks]\n ipeaks = np.argsort(np.abs(dyy))[::-1][:2*npeaks]\n #ipeaks = np.argsort(np.abs(dyy))[::-1][:npeaks]\n iipeaks = np.where(yy[ipeaks]>1e-6)[0]\n inzpeaks = ipeaks[iipeaks]+1\n inzpeaks = np.sort(inzpeaks)\n #these are endpoints of interval\n #pair these with the start points of signal intervals, marked by izpeaks\n iizpeaks = np.where( np.isclose(yy[ipeaks], 0) )[0]\n izpeaks = ipeaks[iizpeaks]\n izpeaks = np.sort(izpeaks)\n \n #cycles and lengths\n icycle = np.array(list(zip(izpeaks,inzpeaks)))\n minclen=np.min(np.diff(icycle,1))\n maxclen=np.max(np.diff(icycle,1))\n padclen = maxclen-np.diff(icycle,1)[:,0]\n padclen = minclen-np.diff(icycle,1)[:,0] \n icycle[:,1]+=padclen\n times = np.vstack([xx[c[0]:c[1]] for c in icycle]).T\n times -= times[0]\n# times = xx[icycle][:,0][:,None] - xx[icycle]\n output = np.stack([yy[c[0]:c[1]] for c in icycle]).T\n\n plt.figure()\n plt.plot(xx,yy,'b')\n plt.plot(xx[icycle],yy[icycle],'r.')\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Truncated Raw Signal')\n plt.title(fname0)\n plt.savefig('truncraw_'+str(ifname0)+'.png')\n plt.show()\n \n p1,p2=0,100\n p1,p2=np.percentile(yy[inz],p1),np.percentile(yy[inz],p2)\n plt.figure()\n plt.hist(yy[inz],bins=nbins,normed=True)\n plt.xlim(p1,p2)\n plt.grid()\n plt.ylabel('pmf')\n plt.xlabel('output')\n plt.title('Raw, Nonzero Signal Histogram')\n plt.savefig('histnz_'+str(ifname0)+'.png')\n plt.show()\n \n \n mean = output.mean(axis=1)\n centered = output-mean[:,None]\n plt.figure()\n plt.plot(times,mean,'k-',lw=8,alpha=0.8,label='mean')\n plt.plot(times,output,'b.',ms=2,alpha=0.4)\n plt.grid()\n plt.xlabel('time')\n plt.ylabel('output')\n plt.title('Signal Cycles as Samples')\n plt.savefig('mean_'+str(ifname0)+'.png')\n plt.show()\n \n \n plt.figure()\n #plt.plot(times,mean,'k-',lw=4,label='mean')\n plt.plot(times,centered ,'.',ms=1,alpha=0.4)\n plt.grid()\n plt.xlabel('time')\n plt.ylabel('output')\n plt.title('Signal (Centered by Sample Mean)')\n plt.savefig('centered_'+str(ifname0)+'.png')\n plt.show()\n \n \n \n \n \n \n \n X = output.copy().T\n #X = centered.copy().T\n \n nr = X.shape[0]\n dimreductiontype='pca'\n \n from sklearn.decomposition import PCA,KernelPCA,FactorAnalysis\n \n \n if(dimreductiontype=='pca'):\n pca = PCA(n_components = nr ,whiten=True)#min(df.shape))\n elif(dimreductiontype=='kpca'):\n pca = KernelPCA(n_components=min(df.shape))\n elif(dimreductiontype=='fa'):\n pca = FactorAnalysis(n_components=min(df.shape))\n \n Z = pca.fit_transform(X)\n \n try:\n print(\"pca.n_components \", pca.n_components)\n print(\"pca.n_features_ \", pca.n_features_)\n print(\"pca.n_samples_ \", pca.n_samples_)\n print('pca.noise_variance_ ', pca.noise_variance_)\n except Exception:\n 1;\n \n try:\n ax,fig=plt.subplots(1,1)\n plt.plot(pca.explained_variance_ratio_,'-o',ms=4)\n plt.grid()\n plt.title('Variance Explained (Percent) by Component')\n plt.xlabel('Principal Component')\n plt.ylabel('Variance Explained')\n plt.grid()\n # plt.legend(ilabel)\n plt.savefig(cwd+\"/\"+str(ifname0)+'_'+dimreductiontype+\"_\"+\"explained_variance_ratio_\"+\".png\")\n plt.show()\n except Exception:\n 1;\n \n \n #pca = FactorAnalysis(n_components=min(df.shape))\n #Z = pca.fit_transform(X)\n #plt.plot(times[:,0],favar)\n #plt.title('Variance Explained (Percent) by Component')\n #plt.xlabel('Principal Component')\n #plt.ylabel('Variance Explained')\n #plt.grid()\n #plt.savefig(cwd+\"/\"+str(ifname0)+'_'+dimreductiontype+\"_\"+\"explained_variance_ratio_\"+\".png\")\n #plt.show()\n \n #\n #pca = FactorAnalysis(n_components=min(df.shape))\n #Z = pca.fit_transform(X)\n #favar = pca.noise_variance_\n #favar = np.sqrt(favar)\n #scale_factor = 8\n #plt.figure()\n #\n #plt.plot(times[:,0],X.T ,'b.',ms=1)\n #plt.plot(times[:,0],Xm[0] ,'k-',lw=6,alpha=0.4)\n #plt.plot(times[:,0],Xm[0] + scale_factor * favar[:],'g-')\n #plt.plot(times[:,0],Xm[0] - scale_factor * favar[:],'r-')\n #plt.title('Variance Explained (Percent) by Component')\n #plt.xlabel('Principal Component')\n #plt.ylabel('Variance Explained')\n #plt.grid()\n #plt.savefig(cwd+\"/\"+str(ifname0)+'_'+dimreductiontype+\"_\"+\"bands_\"+\".png\")\n #plt.show()\n #\n \n \n try:\n for iy in range(0,nr):\n # ax,fig=plt.subplots(1,1)\n x = times\n y = pca.components_[iy]\n plt.figure()\n plt.plot(x,y,'o',ms=4)\n # for ic, vc in enumerate((iclass)):\n # plt.plot(x[vc],y[vc],icolor[ic]+'o',label=ilabel[ic])\n plt.grid(which='both')\n plt.xlabel('Time')\n plt.ylabel('Principal Mode '+str(iy))\n plt.savefig(cwd+\"/\"+str(ifname0)+'_'+dimreductiontype+\"_\"+\"pm\"+str(ix)+\"pm\"+str(iy)+\".png\")\n \n plt.show()\n except Exception:\n 1;\n \n \n \n try:\n plt.figure()\n plt.plot(times,pca.mean_)\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Signal Mean')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n except Exception:\n 1;\n \n def reconstruction_error(pca,Z,X,pnorm=2,ax=0):\n Xr = pca.inverse_transform(Z)\n resid = Xr-X\n if(pnorm=='avg'):\n abserr = resid.mean(axis=0)\n relerr = abserr / pca.mean_\n else:\n abserr = np.linalg.norm(resid,ord=pnorm,axis=ax)\n norm = np.linalg.norm(X,ord=pnorm,axis=ax)\n relerr = abserr/norm\n return Xr.T, abserr, relerr\n #recon,abserr, relerr = reconstruction_error(pca,Z,X, pnorm='avg')\n recon,abserr, relerr = reconstruction_error(pca,Z,X, pnorm=2)\n try:\n plt.figure()\n plt.plot(times[:,0],mean,'k-',lw=8,alpha=0.9,label='mean')\n plt.plot(times[:,0],recon[:,0],'r.',ms=1,alpha=0.8,label='reconstruction')\n plt.plot(times,recon,'r.',ms=1,alpha=0.2)#,label='reconstruction')\n plt.grid()\n plt.legend()\n plt.xlabel('Time')\n plt.ylabel('Approximate Reconstruction of Signal')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n except Exception:\n 1;\n \n \n #Xr = pca.inverse_transform(pca.transform(mean[None,:]))[0];plt.plot(Xr-mean)\n try:\n plt.figure()\n plt.plot(times,relerr,'r.')\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Signal Reconstruction Error')\n plt.title('Relative Signal Reconstruction Error')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n except Exception:\n 1;\n \n try:\n plt.figure()\n # plt.plot(times,pca.mean_,label='Mean')\n plt.plot(times,abserr,'r.',label='Absolute Error')\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Signal Reconstruction Error')\n plt.title('Absolute Signal Reconstruction Error')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n except Exception:\n 1;\n \n \n tt = times[:,0]\n Xm = X.mean(axis=0)\n Xm = Xm[None,:]\n Xc = X-Xm\n \n plt.figure()\n plt.plot(times,Xm[0],'k-',ms=1)\n plt.plot(times,X.T,'.',ms=1)\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Signal')\n plt.title('Signal and Mean')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n \n plt.figure()\n plt.plot(times[:,0], Xc.T)\n plt.grid()\n plt.xlabel('Time')\n plt.ylabel('Fluctuation in Signal about Mean')\n plt.title('Fluctuation in Signal about Mean')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n \n \n plt.figure()\n plt.hist(Xc.flatten(),Xc.shape[0],normed=True)\n plt.grid()\n plt.ylabel('PMF')\n plt.xlabel('Fluctuation in Signal about Mean')\n plt.title('Fluctuation in Signal about Mean')\n plt.savefig(cwd+\"/\"+dimreductiontype+'_'+fname0+'.png')\n plt.show()\n \n \n # U,S,V = np.linalg.svd(Xc,full_matrices=False)\n # \n # plt.plot(S,'-o')\n # explained_variance = np.cumsum(S)/np.sum(S,axis=0)\n # plt.plot(explained_variance,'-o')\n # plt.show()\n \n \n #tol = 0.3\n #itrunc = np.where(explained_variance>tol)[0].min()\n #\n #for itrunc in range(S.shape[0], S.shape[0]-1,-1):\n # S[itrunc:]*=0\n # Xrc = U.dot(np.diag(S).dot(V))\n # \n # error = Xc-Xrc\n # terror = np.mean(Xrc-Xc,axis=0)\n # serror = np.linalg.norm(Xrc-Xc,axis=1,ord=2)\n # nbins = np.sqrt(2 * serror.shape[0]).astype(int)\n ## print( terror)\n ## print('itrunc', itrunc, '' ,' Signal Variance ', Xc.var() - Xrc.var() , ' Signal Fraction ', 1 - Xrc.var() / Xc.var() )\n # xstdev = np.sqrt(np.var(error,axis=1))\n # xtimevariation = np.sqrt(np.var(error,axis=0))\n # print('itrunc', itrunc, '' ,' Signal Time Variation ', xtimevariation,'Sample StDev ', xstdev )#, ' SNR ', np.sqrt( Xc.var() / Xrc.var() - 1 ) )\n # \n # plt.figure()\n # plt.plot(tt,terror,'.')\n # plt.show()\n # \n # plt.figure()\n # plt.plot(times, error.T,'.',ms=2,alpha=0.2)\n # plt.plot(tt, error[0],'.',ms=2,alpha=0.2)\n # plt.show()\n # \n # plt.figure()\n # plt.hist(serror,bins=nbins)\n # plt.show()\n", "sub_path": "leaflet_flutter_data/ex1/aorta_data.py", "file_name": "aorta_data.py", "file_ext": "py", "file_size_in_byte": 11467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.getcwd", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.diff", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 166, "usage_type": "call"}, {"api_name": "sklearn.decomposition.KernelPCA", "line_number": 168, "usage_type": "call"}, {"api_name": "sklearn.decomposition.FactorAnalysis", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 267, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 309, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 327, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 327, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 329, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 329, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 335, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}]} +{"seq_id": "334827427", "text": "import urllib3, json, requests, keyboards\nfrom setting import bot_token, chat_id_service, rest_link_product, rest_link_store, rest_link_stock\nimport telebot\nfrom telebot import types\nimport barcode\nimport time, datetime, schedule\nfrom configparser import ConfigParser\nimport os\nfrom os import path\nfrom mysql.connector import MySQLConnection, Error\nfrom multiprocessing import Process, freeze_support\n#from service import transliterate\n\nurllib3.disable_warnings()\n\nbot = telebot.TeleBot(bot_token)\n\ndirpath = os.path.dirname(__file__)\nconffile = os.path.join(dirpath, 'config.ini')\n\n#Чтение файла конфигурации\ndef read_db_config(filename=conffile, section='mysql'):\n parser = ConfigParser()\n parser.read(filename)\n db = {}\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n db[item[0]] = item[1]\n else:\n raise Exception('{0} not found in the {1} file'.format(section, filename))\n return db\n\n#Первый запуск\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n sql = (\"SELECT * FROM users WHERE chat_id= %s\")\n cursor.execute(sql, [(message.from_user.id)])\n user = cursor.fetchone()\n if not user:\n bot.send_message(message.chat.id, 'Вы впервые здесь. Для продолжения нажмите кнопку \"Зарегистрироваться\"', reply_markup=keyboards.NewUser)\n else:\n bot.send_message(message.chat.id, 'С возвращением!', reply_markup=keyboards.keyboard1)\n cursor.close()\n conn.close()\n\n#Регистрация пользователя\n@bot.message_handler(content_types=['contact'])\ndef add_user(message):\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n sql = (\"SELECT * FROM users WHERE chat_id= %s\")\n cursor.execute(sql, [(message.contact.user_id)])\n user = cursor.fetchone()\n cursor.close()\n conn.close()\n\n if not user:\n newdata = (message.contact.user_id,\n message.contact.first_name,\n message.contact.last_name,\n message.contact.phone_number,\n datetime.datetime.now()\n )\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n cursor.executemany(\"INSERT INTO users (chat_id, first_name, last_name, phone_number,datetime) VALUES (%s,%s,%s,%s,%s)\",\n (newdata,))\n conn.commit()\n cursor.close()\n conn.close()\n bot.send_message(message.chat.id, 'Приятно познакомиться, можете пользоваться сервисом', reply_markup=keyboards.keyboard1)\n\n#Обработка сообщений\n@bot.message_handler(content_types=['text'])\ndef send_text(message):\n if message.text.lower() == 'поиск':\n products(message.chat.id)\n elif message.text.lower() == 'локация':\n city = get_user_city(message.chat.id)\n if city:\n usercity=city\n else:\n usercity='???'\n\n citykeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=1)\n #citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),\n citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),\n types.KeyboardButton(text='Обновить координаты', request_location=True))\n citykeyboard.add(types.KeyboardButton(text='Назад'))\n bot.send_message(message.chat.id, 'Чтобы увидеть товар в ближайших аптеках, выберите город и обновите координаты', reply_markup=citykeyboard)\n elif message.text.lower() == 'назад':\n bot.send_message(message.chat.id, 'Главное меню', reply_markup=keyboards.keyboard1)\n elif message.text.lower().find('выбрать город') == 0:\n try:\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute('select city from store s group by city order by city')\n citys = cursor.fetchall()\n markup = types.InlineKeyboardMarkup()\n for city in citys:\n name = city[0]\n switch_button = types.InlineKeyboardButton(text=name, callback_data='mycity:'+name)\n markup.add(switch_button)\n\n cursor.close()\n conn.close()\n\n bot.send_message(message.chat.id, \"Выберите ваш город\", reply_markup=markup)\n #bot.send_message(message.chat.id, 'Главное меню', reply_markup=keyboards.keyboard1)\n\n #bot.send_message(message.chat.id, todos['name'] + chr(10) + chr(10) + 'Цена: ' + todos['price'] + ' тенге')\n except requests.exceptions.ConnectionError:\n bot.send_message(message.chat.id, 'Отсутствует связь с сервисом цен')\n #Оповестить сервис о проблемах\n bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')\n\n\n #Регистрация местоположения\n@bot.message_handler(content_types=['location'])\ndef send_location(message):\n print(message)\n newdata = (\n message.location.latitude,\n message.location.longitude,\n message.from_user.id\n )\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.executemany(\"UPDATE users SET latitude = %s, longitude = %s WHERE chat_id = %s\",\n (newdata,))\n conn.commit()\n cursor.close()\n conn.close()\n\n bot.send_message(message.chat.id, 'Ваши координаты обновлены')\n\n#Получение фото товара\n@bot.message_handler(content_types=['photo'])\ndef sent_barcode(message):\n raw = message.photo[2].file_id\n file_info = bot.get_file(raw)\n downloaded_file = 'https://api.telegram.org/file/bot' + bot_token + '/' + file_info.file_path\n bcode = barcode.read_barcode(downloaded_file,message.chat.id)\n print(str(bcode))\n\n if bcode == 'No':\n bot.send_message(message.chat.id, 'Не удалось распознать код. Попробуйте еще раз')\n else:\n print(bcode.decode())\n\n\n\n#Формирование результатов поиска\n@bot.inline_handler(func=lambda query: len(query.query) >= 2)\ndef query_text(query):\n offset = int(query.offset) if query.offset else 0\n try:\n\n SQL = \"\"\"\\\n select t.nommodif, t.name, t.producer, t.photo, t.city, case when %s='' then 0 ELSE t.price end price\n FROM (SELECT p1.nommodif, p1.name, p1.producer, p1.photo, p3.city, p2.price FROM product p1\n inner join stock p2 on p2.company = p1.company and p2.product_id = p1.nommodif\n inner join store p3 on p3.company = p2.company and p3.name = p2.store\n WHERE lower(concat(p1.name,COALESCE(p1.search_key,''))) LIKE lower(%s)\n group by p1.nommodif, p1.name, p1.producer, p1.photo, p3.city, p2.price) t\n WHERE (t.city = %s or %s='') LIMIT 5 OFFSET %s\n \"\"\"\n SQL2 = \"\"\"\\\n SELECT p1.nommodif, p1.name, p1.producer, p1.photo, p3.city,\n case when min(p2.price) <> max(p2.price) then\n CONCAT(min(p2.price),' - ',max(p2.price))\n else\n CONCAT(min(p2.price))\n end \n price FROM product p1\n inner join users u on u.chat_id = %s\n inner join stock p2 on p2.company = p1.company and p2.product_id = p1.nommodif\n inner join store p3 on p3.company = p2.company and p3.name = p2.store and p3.city = u.city\n WHERE lower(concat(p1.name,p1.producer,COALESCE(p1.search_key,''))) LIKE lower(%s)\n group by p1.nommodif, p1.name, p1.producer, p1.photo, p3.city\n LIMIT 5 OFFSET %s\n \"\"\"\n #cursor.execute(SQL, (usercity,'%'+query.query+'%',usercity,usercity,offset,))\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(SQL2, (query.from_user.id, '%' + query.query + '%', offset,))\n\n products = cursor.fetchall()\n\n results = []\n try:\n m_next_offset = str(offset + 5) if len(products) == 5 else None\n if products:\n for product in products:\n try:\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F4CC Добавить в список', callback_data='prlist:' + str(product[0])),\n types.InlineKeyboardButton(text='Мой список', callback_data='mylist:'),)\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F30D Искать по списку в аптеках', callback_data='locallist:'),)\n #types.InlineKeyboardButton(text=u'\\U0001F30D Найти аптеку', callback_data='local:'+str(product[0])),\n #types.InlineKeyboardButton(text=u'\\U0001F30D', callback_data='locallist:'),\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F50D Продолжить поиск', switch_inline_query_current_chat=\"\"),)\n\n items = types.InlineQueryResultArticle(\n id=product[0], title=product[1],\n description=\"Производитель: \"+product[2]+\"\\nЦена: \"+str(product[5])+\" тенге\",\n input_message_content=types.InputTextMessageContent(\n message_text='*'+product[1]+'* [.](' + product[3] + ') \\n'+product[2]+'\\nЦена: '+str(product[5])+' тенге',\n parse_mode='markdown',\n disable_web_page_preview=False,\n ),\n reply_markup=markup,\n thumb_url=product[3], thumb_width=100, thumb_height=100\n )\n results.append(items)\n except Exception as e:\n print(e)\n cursor.close()\n conn.close()\n bot.answer_inline_query(query.id, results, next_offset=m_next_offset if m_next_offset else \"\", cache_time=86400)\n #bot.answer_inline_query(query.id, results, next_offset=m_next_offset if m_next_offset else \"\")\n else:\n markup = types.InlineKeyboardMarkup()\n markup.add(\n types.InlineKeyboardButton(text=u'\\U0001F50D Продолжить поиск', switch_inline_query_current_chat=\"\"),\n )\n items = types.InlineQueryResultArticle(\n id='1000', title='Ничего не найдено',\n description=\"Попробуйте изменить запрос...\",\n input_message_content=types.InputTextMessageContent(\n message_text=\"По вашему запросу ничего не найдено. Попробуйте изменить запрос...\",\n parse_mode='markdown',\n disable_web_page_preview=True,\n ),\n reply_markup=markup,\n thumb_url='https://ru.seaicons.com/wp-content/uploads/2017/02/Cute-Ball-Stop-icon.png',\n thumb_width=100, thumb_height=100\n )\n results.append(items)\n bot.answer_inline_query(query.id, results)\n add_logs(query.from_user.id, 'search', query.query)\n except Exception as e:\n print(e)\n\n except Exception as e:\n print(e)\n\n\n\n#Обработка входящих сообщений\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n # Если сообщение из чата с ботом\n if call.message:\n #print(call)\n if call.data.find('mycity:') == 0:\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n cursor.execute('UPDATE users SET city = %s WHERE chat_id = %s', (call.data.replace('mycity:',''),call.from_user.id))\n conn.commit()\n cursor.close()\n conn.close()\n\n #cursor.close()\n #cnx.close()\n usercity = call.data.replace('mycity:','')\n citykeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=1)\n citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),\n types.KeyboardButton(text='Обновить координаты', request_location=True))\n citykeyboard.add(types.KeyboardButton(text='Назад'))\n\n bot.send_message(call.from_user.id,\n 'Ваш город: '+usercity,\n reply_markup=citykeyboard)\n if call.data.find('mylist:') == 0:\n get_search_list(call.from_user.id)\n if call.data.find('clearlist:') == 0:\n #Очистка списка пользоателя\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute('DELETE FROM user_product_list WHERE chat_id = %s', [(call.from_user.id)])\n conn.commit()\n cursor.close()\n conn.close()\n markup = types.InlineKeyboardMarkup()\n markup.add(\n types.InlineKeyboardButton(text=u'\\U0001F50D Продолжить поиск', switch_inline_query_current_chat=\"\"), )\n bot.send_message(call.from_user.id,\n 'Ваш список товаров удален.', reply_markup=markup)\n\n if call.data.find('refresh:') == 0:\n #Импорт данных из аптек\n import_product()\n import_store()\n import_stock()\n if call.data.find('locallist:') == 0:\n search_list(call.from_user.id)\n if call.data.find('locallist_one:') == 0:\n search_list_one(call.from_user.id)\n if call.data.find('prlist:') == 0:\n add_list(call.from_user.id, call.data.replace('prlist:',''), call.id)\n\n # Если сообщение из инлайн-режима\n elif call.inline_message_id:\n if call.data.find('prlist:') == 0:\n add_list(call.from_user.id, call.data.replace('prlist:',''), call.id)\n elif call.data.find('locallist:') == 0:\n get_search_list(call.from_user.id)\n search_list(call.from_user.id)\n elif call.data.find('mylist:') == 0:\n get_search_list(call.from_user.id)\n\ndef products(user_id):\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F4CC' + ' Мой список', callback_data='mylist:'),)\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F50D' + ' Поиск товаров', switch_inline_query_current_chat=\"\"),)\n\n # Сервисная комманда\n if user_id == chat_id_service:\n markup.add(\n types.InlineKeyboardButton(text='Обновить данные', callback_data='refresh:'))\n bot.send_message(user_id, \"КАК ЭТО РАБОТАЕТ:\\n\\n\"\n \"1. В пункте [Локация] выберите город и обновите координаты (если Вы еще этого не сделали)\\n\\n\"\n \"2. Нажмите [\\U0001F50DПоиск], наберите боту часть наименования, например '@goAptoBot анальгин' или просто отправьте боту \\U0001F4CE ФОТО ШТРИХ-КОДА с упаковки товара\\n\\n\"\n \"3. Найдите один или несколько товаров и добавьте их в список \\U0001F4CC \\n\\n\"\n \"4. Нажмите [\\U0001F30D Искать по списку в аптеках] - бот сообщит о цене и найдет ближайшие к вам аптеки, в которых есть товар из списка\",\n parse_mode='HTML', reply_markup=markup)\n\ndef add_logs(user_id, metod, value):\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n now = datetime.datetime.now()\n cursor.executemany(\"INSERT INTO logs (datetime,chat_id,metod,value) VALUES (%s,%s,%s,%s)\",\n [(now,int(user_id), metod,value),])\n conn.commit()\n cursor.close()\n conn.close()\n\ndef add_list(user_id, in_data, call_id):\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.executemany(\"INSERT INTO user_product_list (chat_id, product_id) VALUES (%s,%s)\",\n [(int(user_id), str(in_data)),])\n conn.commit()\n cursor.close()\n conn.close()\n\n add_logs(int(user_id), 'product', str(in_data))\n\n bot.answer_callback_query(call_id, show_alert=True, text=\"Товар добавлен в список\")\n\n#Получение города пользователяя\ndef get_user_city(in_user_id):\n # Ищем город пользователя\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n sql = (\"SELECT city FROM users WHERE chat_id = %s\")\n cursor.execute(sql, [(in_user_id)])\n city = cursor.fetchone()\n cursor.close()\n conn.close()\n if city:\n return city[0]\n else:\n return ''\n\n#Вывод списка товаров\ndef get_search_list(user_id):\n try:\n product_list = 'СПИСОК ДЛЯ ПОИСКА:\\n\\n'\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n sql = (\n \"SELECT p2.name, p2.producer FROM user_product_list p1, product p2 WHERE p2.nommodif = p1.product_id AND p1.chat_id = %s group by p2.name, p2.producer order by p2.name\")\n cursor.execute(sql, [(user_id)])\n products = cursor.fetchall()\n\n for product in products:\n product_list = product_list + '*' + product[0] + '*' + '\\n' + product[1] + '\\n' + '\\n'\n\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F5D1 Очистить список', callback_data='clearlist:'),)\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F30D Искать по списку в аптеках', callback_data='locallist:'),)\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F50D Продолжить поиск', switch_inline_query_current_chat=\"\"),)\n\n bot.send_message(user_id,\n product_list,\n parse_mode='markdown',\n reply_markup=markup, )\n\n cursor.close()\n conn.close()\n except Exception as e:\n print(e)\n bot.send_message(user_id,\n 'Список пустой...')\n\n#Поиск товаров по списку\ndef search_list(user_id):\n #Назначим кнопки\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F30D Искать каждый товар отдельно', callback_data='locallist_one:'),)\n markup.add(types.InlineKeyboardButton(text=u'\\U0001F50D Продолжить поиск', switch_inline_query_current_chat=\"\"), )\n #Проверим что в списке есть товары\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n SQL = 'select count(distinct(product_id)) from user_product_list where chat_id = %s'\n cursor.execute(SQL, (user_id,))\n products = cursor.fetchone()\n\n if products[0]==0:\n bot.send_message(user_id,\n 'Сначала добавьте товары в список для поиска')\n cursor.close()\n conn.close()\n else:\n #Ищем аптеки с поответствием по списку товара\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n SQL = \"\"\"\\\n SELECT s.name, s.address, s.mode, s.phone, s.latitude ,s.longitude, t.way FROM (\n SELECT count(p2.product_id) kol, p1.name, get_way(p1.latitude ,p1.longitude,u.latitude,u.longitude) way FROM users u\n inner join store p1 on p1.city = u.city \n inner join stock p2 on p2.company = p1.company and p1.name = p2.store \n WHERE u.chat_id = %s and p2.product_id in (select distinct(product_id) from user_product_list where chat_id = %s)\n group by p1.name, p1.latitude ,p1.longitude,u.latitude,u.longitude having count(p2.product_id)=(select count(distinct(product_id)) from user_product_list where chat_id = %s)\n ) t \n inner join store s on s.name = t.name\n order by t.way asc \n LIMIT 3\n \"\"\"\n cursor.execute(SQL, (user_id, user_id, user_id,))\n stores = cursor.fetchall()\n\n for store in stores:\n try:\n bot.send_venue(user_id,\n store[4],\n store[5],\n store[0] + ' (' + str(store[6]) + ' м.)',\n store[1]\n )\n bot.send_message(user_id,\n store[2] + '\\n' + 'Тел: ' + store[3] + '\\nЕсть все по списку',\n parse_mode='markdown', )\n except Exception as e:\n print(e)\n cursor.close()\n conn.close()\n bot.send_message(user_id,\n 'Если вас не устроили эти аптеки, вы можете поискать отдельно каждый товар из списка в ближайших аптеках',\n parse_mode='markdown',\n reply_markup=markup, )\n\ndef search_list_one(user_id):\n #Назначим ��нопки\n markup = types.InlineKeyboardMarkup()\n markup.add(\n types.InlineKeyboardButton(text=u'\\U0001F30D Искать каждый товар отдельно', callback_data='locallist_one:'),\n )\n #Проверим что в списке есть товары\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n SQL = 'select count(distinct(product_id)) from user_product_list where chat_id = %s'\n cursor.execute(SQL, (user_id,))\n products = cursor.fetchone()\n\n if products[0]==0:\n bot.send_message(user_id,\n 'Сначала добавьте товары в список для поиска')\n cursor.close()\n conn.close()\n else:\n #Ищем аптеки с поответствием по списку товара\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n SQL = \"\"\"\\\n select r.name, r.producer, p3.name, p3.address, p3.mode, p3.latitude, p3.longitude, p3.phone, t.way, t.price from user_product_list p\n inner join product r on r.nommodif = p.product_id \n inner join users u on u.chat_id = p.chat_id \n inner join store p3 on p3.city = u.city and r.company = p3.company\n inner join \n (\n select distinct(pl.product_id) product_id, p2.price, min(get_way(p3.latitude ,p3.longitude,u.latitude,u.longitude)) way from user_product_list pl\n inner join users u on u.chat_id = pl.chat_id \n inner join stock p2 on p2.product_id = pl.product_id\n inner join store p3 on p3.company = p2.company and p3.name = p2.store and p3.city = u.city\n where pl.chat_id = %s\n group by pl.product_id, p2.price\n ) t\n where p.chat_id = %s\n and get_way(p3.latitude ,p3.longitude,u.latitude,u.longitude)=t.way and r.nommodif = t.product_id\n group by r.name, r.producer, p3.name, p3.address, p3.mode, p3.latitude, p3.longitude, p3.phone, t.way, t.price\n \"\"\"\n cursor.execute(SQL, (user_id, user_id, ))\n stores = cursor.fetchall()\n\n for store in stores:\n try:\n bot.send_venue(user_id,\n store[5],\n store[6],\n store[2] + ' (' + str(store[8]) + ' м.)',\n store[3]\n )\n bot.send_message(user_id,\n '*'+store[0]+'*\\n'+store[1]+'\\n'+'Цена: '+str(store[9])+' тенге\\n\\n'+\n store[4] + '\\n' + 'Тел: ' + store[7] ,\n parse_mode='markdown', )\n except Exception as e:\n print(e)\n cursor.close()\n conn.close()\n\ndef import_data():\n import_product()\n import_store()\n import_stock()\n\n\ndef import_product():\n #Импорт справочника товаров\n try:\n response = requests.get(rest_link_product, verify=False)\n if response.status_code == 404:\n bot.send_message(chat_id_service, 'Не оступен сервер ЦВЕТНАЯ')\n else:\n todos = json.loads(response.text)\n indata = []\n\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(\"DELETE FROM product WHERE company='ЦВЕТНАЯ'\")\n\n for row in todos['items']:\n indata.append((\n 'ЦВЕТНАЯ',\n row['nommodif'],\n row['modif_name'],\n row['producer'],\n row['barcode'],\n row['photo'],\n row['skey'],\n ))\n\n\n '''\n try:\n while todos['next']['$ref']:\n newlink = todos['next']['$ref']\n print(newlink)\n response = requests.get(newlink, verify=False)\n todos = json.loads(response.text)\n for row in todos['items']:\n indata.append((\n 'ЦВЕТНАЯ',\n row['nommodif'],\n row['modif_name'],\n row['producer'],\n row['barcode']\n ))\n '''\n cursor.executemany(\"INSERT INTO product (company,nommodif,name,producer,barcode,photo,search_key) VALUES (%s,%s,%s,%s,%s,%s,%s)\",\n indata)\n\n conn.commit()\n cursor.close()\n conn.close()\n\n bot.send_message(chat_id_service, 'Справочник товаров обновлен')\n #cursor.close()\n #cnx.close()\n except requests.exceptions.ConnectionError:\n # Оповестить сервис о проблемах\n bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')\n\ndef import_store():\n #Импорт справочника аптек\n try:\n response = requests.get(rest_link_store, verify=False)\n if response.status_code == 404:\n bot.send_message(chat_id_service, 'Не доступен сервер ЦВЕТНАЯ')\n else:\n todos = json.loads(response.text)\n indata = []\n\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(\"DELETE FROM store WHERE company='ЦВЕТНАЯ'\")\n\n for row in todos['items']:\n indata.append((\n row['company'],\n row['store'],\n row['city'],\n row['address'],\n row['lon'],\n row['lat'],\n row['phone'],\n row['resh']\n ))\n cursor.executemany(\n \"INSERT INTO store (company,name,city,address,longitude,latitude,phone,mode) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\",\n indata)\n\n conn.commit()\n cursor.close()\n conn.close()\n\n bot.send_message(chat_id_service, 'Справочник аптек обновлен')\n #cursor.close()\n #cnx.close()\n except requests.exceptions.ConnectionError:\n # Оповестить сервис о проблемах\n bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')\n\ndef import_stock():\n #Импорт остатков\n try:\n response = requests.get(rest_link_stock, verify=False)\n if response.status_code == 404:\n bot.send_message(chat_id_service, 'Не оступен сервер ЦВЕТНАЯ')\n else:\n todos = json.loads(response.text)\n indata = []\n\n db_config = read_db_config()\n conn = MySQLConnection(**db_config)\n cursor = conn.cursor()\n\n cursor.execute(\"DELETE FROM stock WHERE company='ЦВЕТНАЯ'\")\n\n for row in todos['items']:\n indata.append((\n 'ЦВЕТНАЯ',\n row['store'],\n row['nommodif'],\n row['restfact'],\n row['price']\n ))\n try:\n while todos['next']['$ref']:\n newlink = todos['next']['$ref']\n print(newlink)\n response = requests.get(newlink, verify=False)\n todos = json.loads(response.text)\n for row in todos['items']:\n indata.append((\n 'ЦВЕТНАЯ',\n row['store'],\n row['nommodif'],\n row['restfact'],\n row['price']\n ))\n except Exception as e:\n print(e)\n cursor.executemany(\"INSERT INTO stock (company,store,product_id,qnt,price) VALUES (%s,%s,%s,%s,%s)\",\n indata)\n\n conn.commit()\n cursor.close()\n conn.close()\n\n bot.send_message(chat_id_service, 'Остатки обновлены')\n #cursor.close()\n #cnx.close()\n except requests.exceptions.ConnectionError:\n # Оповестить сервис о проблемах\n bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')\n\n# Подключаем планировщик повторений\n#schedule.every().day.at(\"05:00\").do(job)\n#schedule.every().hour.do(import_data)\n\"\"\"\nschedule.every(10).minutes.do(import_data)\n\n\n# это функция проверки на запуск импорта\ndef check_import_data():\n while True:\n schedule.run_pending()\n time.sleep(60)\n\n# а теперь запускаем проверку в отдельном потоке\nif __name__ == '__main__':\n freeze_support()\n p1 = Process(target=check_import_data, args=())\n p1.start()\n\n\"\"\"\nwhile True:\n try:\n bot.polling(none_stop=True)\n except Exception as e:\n print(e)\n # повторяем через 15 секунд в случае недоступности сервера Telegram\n time.sleep(15)\n\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 33479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 14, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 16, "usage_type": "call"}, {"api_name": "setting.bot_token", "line_number": 16, "usage_type": "argument"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 23, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 38, "usage_type": "call"}, {"api_name": "keyboards.NewUser", "line_number": 45, "usage_type": "attribute"}, {"api_name": "keyboards.keyboard1", "line_number": 47, "usage_type": "attribute"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 72, "usage_type": "call"}, {"api_name": "keyboards.keyboard1", "line_number": 79, "usage_type": "attribute"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 93, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 93, "usage_type": "attribute"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 95, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 95, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 96, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 96, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 97, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 97, "usage_type": "name"}, {"api_name": "keyboards.keyboard1", "line_number": 100, "usage_type": "attribute"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 104, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 109, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 109, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 112, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 112, "usage_type": "name"}, {"api_name": "requests.exceptions", "line_number": 122, "usage_type": "attribute"}, {"api_name": "setting.chat_id_service", "line_number": 125, "usage_type": "argument"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 138, "usage_type": "call"}, {"api_name": "setting.bot_token", "line_number": 154, "usage_type": "name"}, {"api_name": "barcode.read_barcode", "line_number": 155, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 197, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 210, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 210, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 211, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 211, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 212, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 212, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 213, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 213, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 216, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 216, "usage_type": "name"}, {"api_name": "telebot.types.InlineQueryResultArticle", "line_number": 218, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 218, "usage_type": "name"}, {"api_name": "telebot.types.InputTextMessageContent", "line_number": 221, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 221, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 237, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 237, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 239, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 239, "usage_type": "name"}, {"api_name": "telebot.types.InlineQueryResultArticle", "line_number": 241, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 241, "usage_type": "name"}, {"api_name": "telebot.types.InputTextMessageContent", "line_number": 244, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 244, "usage_type": "name"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 272, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 282, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 282, "usage_type": "attribute"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 283, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 283, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 284, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 284, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 285, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 285, "usage_type": "name"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 295, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 302, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 302, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 304, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 304, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 331, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 331, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 332, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 332, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 333, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 333, "usage_type": "name"}, {"api_name": "setting.chat_id_service", "line_number": 336, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 338, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 338, "usage_type": "name"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 350, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 350, "usage_type": "attribute"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 359, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 376, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 393, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 403, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 403, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 404, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 404, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 405, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 405, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 406, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 406, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 423, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 423, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 424, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 424, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 425, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 425, "usage_type": "name"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 428, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 443, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 483, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 483, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 485, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 485, "usage_type": "name"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 489, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 504, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 554, "usage_type": "call"}, {"api_name": "setting.rest_link_product", "line_number": 554, "usage_type": "argument"}, {"api_name": "setting.chat_id_service", "line_number": 556, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 558, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 562, "usage_type": "call"}, {"api_name": "setting.chat_id_service", "line_number": 602, "usage_type": "argument"}, {"api_name": "requests.exceptions", "line_number": 605, "usage_type": "attribute"}, {"api_name": "setting.chat_id_service", "line_number": 607, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 612, "usage_type": "call"}, {"api_name": "setting.rest_link_store", "line_number": 612, "usage_type": "argument"}, {"api_name": "setting.chat_id_service", "line_number": 614, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 616, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 620, "usage_type": "call"}, {"api_name": "setting.chat_id_service", "line_number": 644, "usage_type": "argument"}, {"api_name": "requests.exceptions", "line_number": 647, "usage_type": "attribute"}, {"api_name": "setting.chat_id_service", "line_number": 649, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 654, "usage_type": "call"}, {"api_name": "setting.rest_link_stock", "line_number": 654, "usage_type": "argument"}, {"api_name": "setting.chat_id_service", "line_number": 656, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 658, "usage_type": "call"}, {"api_name": "mysql.connector.MySQLConnection", "line_number": 662, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 679, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 680, "usage_type": "call"}, {"api_name": "setting.chat_id_service", "line_number": 698, "usage_type": "argument"}, {"api_name": "requests.exceptions", "line_number": 701, "usage_type": "attribute"}, {"api_name": "setting.chat_id_service", "line_number": 703, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 731, "usage_type": "call"}]}