seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70373498108
|
from typing import Optional, Any, Union, Callable
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn.modules import Module
from .linear import Linear
from .normalization import LayerNorm
from .activation import MultiheadAttention
from .dropout import Dropout
class TransformerEncoderLayer(Module):
r"""Pytorch 2.0
TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectively. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
Fast path:
forward() will use a special optimized implementation described in
`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
conditions are met:
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
argument ``requires_grad``
- training is disabled (using ``.eval()``)
- batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
- activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
- at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
- if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
nor ``src_key_padding_mask`` is passed
- the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
unless the caller has manually modified one without modifying the other)
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
passed for ``src`` to represent padding more efficiently than using a padding
mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
returned, and an additional speedup proportional to the fraction of the input that
is padding can be expected.
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
https://arxiv.org/abs/2205.14135
"""
__constants__ = ["batch_first", "norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5,
batch_first: bool = False,
norm_first: bool = False,
device=None,
dtype=None,
B: int = 1,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.B = B
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, B=B, **factory_kwargs)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward, B=B, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, B=B, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, B=B, **factory_kwargs)
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, B=B, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
# For Hydro scaling
self.d_model = d_model
self.nhead = nhead
self.dim_feedforward = dim_feedforward
self.dropout_value = dropout
self.layer_norm_eps = layer_norm_eps
self.batch_first = batch_first
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "activation"):
self.activation = F.relu
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
is_causal: bool = False,
) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
is_causal: If specified, applies a causal mask as src_mask.
Default: ``False``.
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src_key_padding_mask = F._canonical_mask(
mask=src_key_padding_mask,
mask_name="src_key_padding_mask",
other_type=F._none_or_dtype(src_mask),
other_name="src_mask",
target_type=src.dtype,
)
# Fast path NOT support training
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
why_not_sparsity_fast_path = ""
if not src.dim() == 3:
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
elif self.training:
why_not_sparsity_fast_path = "training is enabled"
elif not self.self_attn.batch_first:
why_not_sparsity_fast_path = "self_attn.batch_first was not True"
elif not self.self_attn._qkv_same_embed_dim:
why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
elif not self.activation_relu_or_gelu:
why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
elif not (self.norm1.eps == self.norm2.eps):
why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None):
why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
elif self.self_attn.num_heads % 2 == 1:
why_not_sparsity_fast_path = "num_head is odd"
elif torch.is_autocast_enabled():
why_not_sparsity_fast_path = "autocast is enabled"
if not why_not_sparsity_fast_path:
tensor_args = (
src,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
elif not all((x.is_cuda or "cpu" in str(x.device)) for x in tensor_args):
why_not_sparsity_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
why_not_sparsity_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_sparsity_fast_path:
merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src)
return torch._transformer_encoder_layer_fwd(
src,
self.self_attn.embed_dim,
self.self_attn.num_heads,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.activation_relu_or_gelu == 2,
self.norm_first,
self.norm1.eps,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
merged_mask,
mask_type,
)
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
def extra_repr(self) -> str:
s = "{d_model}, {nhead}, dim_feedforward={dim_feedforward}, dropout={dropout_value}, layer_norm_eps={layer_norm_eps}, B={B}"
if self.activation != F.relu:
if isinstance(self.activation, str):
s += ", activation={activation}"
else:
s += ", activation={activation.__name__}"
if self.batch_first:
s += ", batch_first=True"
if self.norm_first:
s += ", norm_first=True"
return s.format(**self.__dict__)
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
|
S-Lab-System-Group/Hydro
|
hydro/fuse_ops/transformer.py
|
transformer.py
|
py
| 12,254 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "torch.nn.modules.Module",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "activation.MultiheadAttention",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "linear.Linear",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "dropout.Dropout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "linear.Linear",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "normalization.LayerNorm",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "normalization.LayerNorm",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "dropout.Dropout",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "dropout.Dropout",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.gelu",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional._canonical_mask",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional._none_or_dtype",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "torch.is_autocast_enabled",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.overrides.has_torch_function",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "torch.overrides",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "torch.is_grad_enabled",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "torch._transformer_encoder_layer_fwd",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.gelu",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 269,
"usage_type": "name"
}
] |
20250267652
|
from mimetypes import init
import requests
import urllib.parse
import json
class MapBox:
def __init__(self, access_token) -> None:
self.root_url = "https://api.mapbox.com/geocoding/v5/mapbox.places/{}.json?types=place%2Caddress%2Cregion&access_token={}"
self.access_token = access_token
def getCoordinates(self, location_str):
if location_str == "":
return (0,0)
formatted_location = urllib.parse.quote(location_str)
url = self.root_url.format(formatted_location, self.access_token)
response = requests.get(url)
data = json.loads(response.text)
if (len(data["features"]) > 0):
coordinates = data["features"][0]["center"]
if coordinates != None and len(coordinates) == 2:
return (coordinates[1], coordinates[0])
else:
return (0,0)
mb = MapBox("pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dzbDMxMWdrMTJ4b2wzMjlqNXZvNyJ9.K6nzS4XPLOfQ0srwV3M5rw")
# https://api.mapbox.com/geocoding/v5/mapbox.places/Collegeville%2C%20PA.json?access_token=pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dyZWJvMzBrMTJxcG0xenBtYTdhZiJ9.uFJLIrcDl4OHJu1S-To2xA
# https://api.mapbox.com/geocoding/v5/mapbox.places/Collegeville%2C%20PA..hson?access_token=pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dzbDMxMWdrMTJ4b2wzMjlqNXZvNyJ9.K6nzS4XPLOfQ0srwV3M5rw
|
andrewhuang427/WashU-Athletics-Demographics
|
utils/MapBox.py
|
MapBox.py
|
py
| 1,369 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.parse.parse.quote",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
}
] |
13126886716
|
from itertools import combinations
def make_all_cases(user_info_array):
all_cases_from_user = [];
for i in range(5):
combination_array = combinations([0,1,2,3],i)
for combination in combination_array:
case = "" #[] -> ----
for j in range(4):
if j in combination:
case += user_info_array[j]
else :
case += "-"
all_cases_from_user.append(case);
return all_cases_from_user
def get_lower_bound(target,array):
current_min = 0;
current_max = len(array)
while current_min < current_max:
current_guess = (current_min + current_max) // 2;
if array[current_guess] >= target:
current_max = current_guess;
else:
current_min = current_guess +1;
return current_max
def solution(info, query):
answer = [];
all_cases_from_users = {}
for user_info in info:
user_info_array = user_info.split()
all_cases_from_user = make_all_cases(user_info_array);
for case in all_cases_from_user:
if case not in all_cases_from_users.keys():
all_cases_from_users[case] = [int(user_info_array[4])]
else :
all_cases_from_users[case].append(int(user_info_array[4]))
for key in all_cases_from_users.keys():
all_cases_from_users[key].sort()
for query_info in query:
query_info_array = query_info.split()
case = query_info_array[0] + query_info_array[2] +query_info_array[4] + query_info_array[6];
if case in all_cases_from_users.keys():
target_users = all_cases_from_users[case]
answer.append(len(target_users) - get_lower_bound(int(query_info_array[7]), target_users))
else :
answer.append(0)
return answer
|
39world/Today-Algorithm-Study-
|
old_test/al_pg_08.py
|
al_pg_08.py
|
py
| 2,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.combinations",
"line_number": 7,
"usage_type": "call"
}
] |
477748253
|
import torch as t
import ipdb
class AttentionPooling(t.nn.Module):
def __init__(self, input_size, hidden_size, dropout):
super(AttentionPooling, self).__init__()
self.projection1 = t.nn.Linear(input_size, hidden_size, bias=True)
self.dropout = t.nn.Dropout(dropout)
self.projection2 = t.nn.Linear(hidden_size, 1, bias=False)
self.projection3 = t.nn.Linear(input_size, hidden_size)
t.nn.init.xavier_normal_(self.projection1.weight)
t.nn.init.xavier_normal_(self.projection2.weight)
t.nn.init.xavier_normal_(self.projection3.weight)
def forward(self, inputs, input_mask=None):
"""
:param inputs: [B, L, E]
:param input_mask: [B, L]
:return: [B, E]
"""
if input_mask is not None:
input_mask = input_mask.byte()
net = t.nn.functional.tanh(self.projection1(inputs))
# [B, L, H]
net = self.projection2(net).squeeze(-1)
# [B, L, 1]
if input_mask is not None:
net = net.masked_fill(1-input_mask, -float('inf'))
net = t.nn.functional.softmax(net, -1).unsqueeze(-1)
# [B, L, 1]
net = inputs * net
# [B, L, E]
net = net.sum(-2)
net = self.projection3(net)
# [B, E]
return net
|
CNDPlab/MSMARCO_Reshaped
|
Predictor/ModelUtils/query_pooling.py
|
query_pooling.py
|
py
| 1,316 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavier_normal_",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavier_normal_",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavier_normal_",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.tanh",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "attribute"
}
] |
39654407914
|
import os
import logging
import yaml
from typing import Dict, Any
from yacs.config import CfgNode as _CfgNode
BASE_KEY = "__BASE__"
class CfgNode(_CfgNode):
@staticmethod
def load_yaml_with_base(filename: str, allow_unsafe: bool = False):
with open(filename, 'r') as file:
try:
cfg = yaml.safe_load(file)
except:
logger = logging.getLogger(__name__)
logger.warning(
"Loading config {} with yaml.unsafe_load. Your machine may "
"be at risk if the file contains malicious content.".format(
filename
)
)
file.close()
with open(filename, "r") as file:
cfg = yaml.unsafe_load(file)
def merge_a_into_b(a: Dict[Any, Any], b: Dict[Any, Any]) -> None:
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance( b[k], dict ), "Cannot inhert key '{}' from base !".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if BASE_KEY in cfg:
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith("~"):
base_cfg_file = os.path.expanduser(base_cfg_file)
if not any(
map(base_cfg_file.startswith, ["/", "https://", "http://"])
):
# the path to base cfg is relative to the config file itself
base_cfg_file = os.path.join(os.path.dirname(filename), base_cfg_file)
base_cfg = CfgNode.load_yaml_with_base(
base_cfg_file, allow_unsafe=allow_unsafe
)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename: str, allow_unsave: bool=False) -> None:
loaded_cfg = CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsave)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
def merge_from_other_cfg(self, cfg_other):
assert (
BASE_KEY not in cfg_other
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super(CfgNode, self).merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list):
keys = set(cfg_list[0::2])
assert (
BASE_KEY not in keys
), "The reserved key '{}' can obly be used in files!".format(BASE_KEY)
return super(CfgNode, self).merge_from_list(cfg_list)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith("COMPUTED_"):
if name in self:
old_val = self[name]
if old_val == value:
return
raise KeyError(
"Computed attributed '{}' alread exists"
"with a different value! old={}, net={}".format(
name, old_val, value
)
)
self[name] = value
else:
super(CfgNode, self).__setattr__(name=name, value=value)
def dump(self, **kwargs):
return super(CfgNode, self).dump()
|
lqxisok/llSeg
|
configs/base.py
|
base.py
|
py
| 3,491 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "yacs.config.CfgNode",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "yaml.unsafe_load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "os.path.expanduser",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 74,
"usage_type": "name"
}
] |
71927845947
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import logging
import sys
import os
import urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
from resources.lib import loghandler
loghandler.config()
LOG = logging.getLogger()
PLUGIN_PATH = 'plugin://plugin.video.proof-of-concept'
__addon__ = xbmcaddon.Addon()
__addon_path__ = __addon__.getAddonInfo('path').decode('utf-8')
# Dummy video file with a lenght of 10min, 5s
VIDEO_FILE_PATH = os.path.join(__addon_path__, 'dummy-movie.mkv').encode('utf-8')
TOTAL_LENGTH = 10 * 60 + 5
RESUME = 5 * 60
def directory_item(label, path):
"""
Adds a xbmcplugin.addDirectoryItem() directory itemlistitem
"""
listitem = xbmcgui.ListItem(label, path=path)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=path,
listitem=listitem,
isFolder=True)
def main_menu():
xbmcplugin.setContent(int(sys.argv[1]), 'files')
directory_item('Proof of concept',
'%s/?mode=demo' % PLUGIN_PATH)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_demo():
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
listitem = xbmcgui.ListItem('Demo video file',
path=VIDEO_FILE_PATH)
# PROOF-OF-CONCEPT: Let's add a resume point
listitem.setProperty("totaltime", str(TOTAL_LENGTH))
listitem.setProperty("resumetime", str(RESUME))
listitem.setProperty("StartOffset", str(RESUME))
# END
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=VIDEO_FILE_PATH,
listitem=listitem)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if __name__ == '__main__':
LOG.info('Full sys.argv received: %s', sys.argv)
args = sys.argv[2][1:].decode('utf-8')
args = dict(urlparse.parse_qsl(args))
mode = args.get('mode')
if mode == 'demo':
show_demo()
else:
main_menu()
|
croneter/plugin.video.proof-of-concept
|
default.py
|
default.py
|
py
| 2,070 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "resources.lib.loghandler.config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "resources.lib.loghandler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xbmcaddon.Addon",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "xbmcgui.ListItem",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "xbmcplugin.addDirectoryItem",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "xbmcplugin.setContent",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "xbmcplugin.endOfDirectory",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "xbmcplugin.setContent",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "xbmcgui.ListItem",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "xbmcplugin.addDirectoryItem",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "xbmcplugin.endOfDirectory",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "urlparse.parse_qsl",
"line_number": 64,
"usage_type": "call"
}
] |
32659197304
|
from werkzeug.exceptions import ClientDisconnected
from flask import Flask, request
from flask import current_app
from flask_cache import Cache
from mongoengine import connect
from flask_superadmin import Admin
from flask_mail import Mail
from flaskext.markdown import Markdown
from flask_restful import Api
from reverse_proxied import ReverseProxied
from assets import assets
import json
class ExtensionAccessObject(object):
def __init__(self):
self.cache = Cache(current_app, config={'CACHE_TYPE': 'simple'})
self.mongo = connect(current_app.config["MONGO_DB"])
self.mail = Mail(current_app)
self.admin = Admin(current_app)
self.rest_api = Api(current_app, prefix="/api")
self.markdown = Markdown(current_app, safe_mode="escape")
self.assets = assets(current_app)
def construct_application(config_override=None):
# Setup App
application = Flask(__name__)
# Setup Extensions
ReverseProxied(application)
# Setup Jinja Env
application.jinja_env.add_extension('jinja2.ext.do')
from util import pretty_date_since, full_date
application.jinja_env.filters['pretty_date'] = pretty_date_since
application.jinja_env.filters['full_date'] = full_date
application.jinja_env.filters['json_dump'] = json.dumps
# Load local_config
with application.app_context():
from config import local_config
application.config.from_object(local_config)
application.config.from_object(config_override)
with application.app_context():
application.extension_access_object = ExtensionAccessObject()
# Load blueprints files
with application.app_context():
from config import blueprint_config
application.config.from_object(blueprint_config)
# Setup blueprints from config
for blueprint in application.config["BLUEPRINTS"]: # TODO: Find a way to replace this, its shit
application.register_blueprint(**blueprint)
# Read the git hash from a file. This should be set by the deploy script
try:
with open('version_hash', 'r') as version_file:
application.config['version_hash'] = version_file.readline()
except IOError:
application.config['version_hash'] = "DEVELOP"
# Setup airbrake/errbit
if application.config.get('AIRBRAKE_ENABLED', True):
from airbrake import AirbrakeErrorHandler
from flask.signals import got_request_exception
@got_request_exception.connect_via(application)
def log_exception(sender, exception, **extra):
if isinstance(exception, (ClientDisconnected, )):
return
handler = AirbrakeErrorHandler(
api_key=application.config['AIRBRAKE_API_KEY'],
api_url=application.config['AIRBRAKE_API_URL'],
env_name=application.config['version_hash'],
env_variables={'type': 'caught'},
request_url=request.url,
request_path=request.path,
request_method=request.method,
request_args=request.args,
request_headers=request.headers)
handler.emit(exception)
def log_error(exception):
handler = AirbrakeErrorHandler(
api_key=application.config['AIRBRAKE_API_KEY'],
api_url=application.config['AIRBRAKE_API_URL'],
env_name=application.config['version_hash'],
env_variables={'type': 'logged'},
request_url=request.url,
request_path=request.path,
request_method=request.method,
request_args=request.args,
request_headers=request.headers)
handler.emit(exception)
application.log_error = log_error
else:
def dummy_log_error(exception):
print(exception)
application.log_error = dummy_log_error
# Load debug stuffs
if application.config['DEBUG']:
with application.app_context():
import debug
debug.setup_env()
return application
|
JunctionAt/JunctionWWW
|
constructor.py
|
constructor.py
|
py
| 4,126 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask_cache.Cache",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "mongoengine.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask_mail.Mail",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "flask_superadmin.Admin",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "flask_restful.Api",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "flaskext.markdown.Markdown",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "assets.assets",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.current_app",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "flask.Flask",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "reverse_proxied.ReverseProxied",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "util.pretty_date_since",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "util.full_date",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "config.local_config",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "config.blueprint_config",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "werkzeug.exceptions.ClientDisconnected",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "airbrake.AirbrakeErrorHandler",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.request.url",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask.request.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "flask.request.headers",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "flask.signals.got_request_exception.connect_via",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.signals.got_request_exception",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "airbrake.AirbrakeErrorHandler",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.request.url",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flask.request.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.request.headers",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "debug.setup_env",
"line_number": 111,
"usage_type": "call"
}
] |
18716841287
|
#! /user/bin/env python
# -*- coding:utf-8 -*-
'''
爬取列表信息
'''
import json
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from douyin.items import DouyinCategoryItem
class categorySpider(CrawlSpider):
name = 'categorySpider'
redis_key = 'categorySpider'
cursor_num = 0
count_size = 10
url = "https://aweme.snssdk.com/aweme/v1/category/list/?version_code=181&count=10&cursor="
start_urls = [url + str(cursor_num)]
def parse(self, response):
jsonresp = json.loads(response.body_as_unicode())
if jsonresp['status_code'] == 0:
if jsonresp['has_more'] == 1:
aweme_list = list(jsonresp['category_list'])
for jsonobj in aweme_list:
item = self.init_item(jsonobj)
yield item
self.cursor_num += self.count_size
nexturl = self.url + str(self.cursor_num)
yield Request(nexturl, callback=self.parse)
else:
aweme_list = list(jsonresp['category_list'])
for jsonobj in aweme_list:
item = self.init_item(jsonobj)
yield item
def init_item(self, jsonobj):
item = DouyinCategoryItem()
if str(jsonobj['desc']) == "热门挑战":
item['category_type'] = jsonobj['desc']
item['category_id'] = jsonobj['challenge_info']['cid']
item['category_desc'] = jsonobj['challenge_info']['desc']
item['category_title'] = jsonobj['challenge_info']['cha_name']
item['category_url'] = jsonobj['challenge_info']['schema']
item['category_user_count'] = jsonobj['challenge_info']['user_count']
else:
# print("执行热门音乐赋值")
item['category_type'] = jsonobj['desc']
item['category_title'] = jsonobj['music_info']['title']
item['category_id'] = jsonobj['music_info']['mid']
item['category_url'] = 'https://api.amemv.com/aweme/v1/music/aweme/?music_id=' + \
str(jsonobj['music_info']['mid'])
item['category_desc'] = jsonobj['music_info']['offline_desc']
item['category_user_count'] = jsonobj['music_info']['user_count']
return item
|
gisShield/douyin
|
douyin/spiders/categoryspider.py
|
categoryspider.py
|
py
| 2,313 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scrapy.http.Request",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "douyin.items.DouyinCategoryItem",
"line_number": 40,
"usage_type": "call"
}
] |
41040069910
|
from tracemalloc import start
import pyaudio
import wave
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import *
import numpy as np
import struct
import time
from scipy import interpolate
plt.style.use('gray-background')
class Fourier:
def __init__(self, scale, dt):
self.scale = scale
def fourier(self, f): # 人間の可聴域は20~20,000Hz
f = np.array([f]).reshape(-1)
len_f = len(f)
# resize
inv_F_ = np.resize(f, int(len_f*self.scale))
# リサンプリング
t = np.arange(0, len(inv_F_))
f_linear = interpolate.interp1d(t, inv_F_, kind='cubic')
t = np.arange(0, len(inv_F_)-1.0, self.scale)
inv_F_ = f_linear(t)
inv_F_ = np.array(inv_F_, dtype='int16')
binv_F = struct.pack('h' * len(inv_F_), *inv_F_) #バイナリへ変換
return binv_F
class Audio:
def __init__(self, chunk=2**10, format=pyaudio.paInt16, channels=1, rate=44100,
record_time=50, interval=0.01, output_path="./data/output.wav"):
self.chunk = chunk #バッファのサイズ
self.format = format #量子化ビット数(解像度) ※人間は16 bit 以上は聞き分けが難しくなる
self.channels = channels #入力に使用するマイクの本数
self.rate = rate #サンプリング周波数
self.record_time = record_time #録音時間
self.interval = interval #グラフを出力する時間間隔 [ms]
self.output_path = output_path #データ出力するファイル名
self.p = pyaudio.PyAudio() #インスタンスの設定
self.stream = self.p.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True, output=True,
frames_per_buffer=self.chunk) #パラメータの設定
def exit(self):
self.stream.stop_stream() # 再生・録音の一時停止
self.stream.close() # ストリームの終了
self.p.terminate() # インスタンスの破棄
class Output:
def __init__(self, audio, scale=1):
self.audio = audio
del_x = 1/self.audio.rate
self.end_t = del_x*self.audio.chunk
self.scale = scale
#self.frames = []
def draw_init(self, ax):
ax.set_xlabel('Time')
ax.set_ylabel('Amplitude')
def draw(self):
frames = []
f = Fourier(scale=self.scale, dt=self.audio.interval)
print("Recording ...")
# for i in range(0, int(self.audio.rate / self.audio.chunk * self.audio.record_time)):
while self.audio.stream.is_active():
data = self.audio.stream.read(self.audio.chunk)
wavy_ = np.frombuffer(data, dtype='int16')
binv_F = f.fourier(wavy_)
self.audio.stream.write(binv_F)
# frames.append(binv_F)
print("Done.")
return frames
def write(self, frames): # データの書き込み
wf = wave.open(self.audio.output_path, 'wb')
wf.setnchannels(self.audio.channels)
wf.setsampwidth(self.audio.p.get_sample_size(self.audio.format))
wf.setframerate(self.audio.rate*self.scale)
wf.writeframes(b''.join(frames))
wf.close()
if __name__=="__main__":
scale = 2.0
audio = Audio()
output=Output(audio, scale=scale)
frames = output.draw()
# output.write(frames)
audio.exit()
|
MoeMatsuda-ai/SWVC
|
test/fft_live_test/inout_live.py
|
inout_live.py
|
py
| 3,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.resize",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyaudio.paInt16",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "wave.open",
"line_number": 89,
"usage_type": "call"
}
] |
73027828029
|
import senticnet5 as sent_dict
import pandas as pd
import numpy as np
from itertools import islice
from sklearn.model_selection import train_test_split
import re
# returns numpy array
def get_ratings(ratings_filename):
return np.load(ratings_filename)
# returns array of document arrays with words
def get_reviews(reviews_filename):
reviews = []
with open(reviews_filename, "r") as f:
for line in f:
reviews.append([w.lower() for w in re.sub('[^A-Za-z \']+', "", line).split()])
return reviews
# returns word polarity: float
# if word not in dictionary return None
def word_polarity(word):
try:
return float(sent_dict.senticnet[word][7])
except:
return None
# return average polarity of a given document
# if none of the words are in dictionary return None
# accounts all single words and combinations of 2 words
def document_polarity(doc):
polarity_sum = 0.0
num_words_accounted = 0
phrases = get_phrases(doc, 2)
for phrase in phrases:
current_polarity = word_polarity(phrase)
if current_polarity is not None:
polarity_sum += current_polarity
num_words_accounted += 1
if num_words_accounted > 0:
return polarity_sum / num_words_accounted
return None
# calculates polarities for given txt file with documents
# saves dictionary with average document polarity at given rating and number of rating occurrences
def train(filename):
print("TRAINING SIMPLE SENTIMENT")
results = {
0.0: [0.0, 0], # average polarity at given rating
1.0: [0.0, 0],
2.0: [0.0, 0],
3.0: [0.0, 0],
4.0: [0.0, 0],
"Undefined": [0.0, 0] # if polarity can't be determined use this to determine average rating for such occurrences
}
ratings = get_ratings(filename + "_ratings.npy")
reviews = get_reviews(filename + "_reviews.txt")
x_train, x_test, y_train, y_test = train_test_split(reviews, ratings, test_size=0.2, random_state=1)
for doc, rating in zip(x_train, y_train):
polarity = document_polarity(doc)
if polarity is None:
results["Undefined"][0] += rating
results["Undefined"][1] += 1
else:
results[rating][0] += polarity
results[rating][1] += 1
for key in results:
results[key][0] = results[key][0] / max(results[key][1], 1)
pd.DataFrame(results).to_csv(filename + "_polarities.csv")
# gives rating prediction based on closest average document polarity
def predictions(filename):
print("PREDICTING SIMPLE SENTIMENT")
predictions = []
ratings = get_ratings(filename + "_ratings.npy")
reviews = get_reviews(filename + "_reviews.txt")
rating_polarities = pd.read_csv(filename + "_polarities.csv")
default_rating = float(round(rating_polarities.loc[0, "Undefined"]))
polarities = rating_polarities[["0.0", "1.0", "2.0", "3.0", "4.0"]].iloc[0].tolist()
x_train, x_test, y_train, y_test = train_test_split(reviews, ratings, test_size=0.2, random_state=1)
for doc, rating in zip(x_test, y_test):
polarity = document_polarity(doc)
prediction = default_rating
if polarity is not None:
prediction = float(polarities.index(min(polarities, key=lambda x:abs(x - polarity))))
predictions.append(prediction)
pd_ratings = pd.Series(ratings[:len(predictions)], name="Actual")
pd_predictions = pd.Series(predictions, name="Predicted")
confusion_matrix = pd.crosstab(pd_predictions, pd_ratings)
return confusion_matrix
# generates exhaustible sliding window over a sequence
# [1, 2, 3, 4], 2 => 12 23, 34, 4
# [1, 2, 3, 4], 3 => 123, 234, 34, 4
def get_windows(sequence, n):
windows = []
for i, x in enumerate(sequence):
windows.append(list(islice(sequence, i, i+n)))
return windows
# returns all combinations retaining the order
# eg. 1, 2, 3 => 1, 1_2, 1_2_3
def get_combinations(sequence):
combinations = []
for i, x in enumerate(sequence):
combinations.append("_".join(sequence[:i] + [x]))
return combinations
# returns all posible combinations with a sliding window
# eg. window_size = 2
# 1, 2, 3, 4 => 1, 1_2, 2, 2_3, 3, 3_4,
def get_phrases(doc, window_size):
phrases = []
for window in get_windows(doc, window_size):
phrases += get_combinations(window)
return phrases
|
jgombac/RatingPredictor
|
simple_sentiment.py
|
simple_sentiment.py
|
py
| 4,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "senticnet5.senticnet",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.crosstab",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 115,
"usage_type": "call"
}
] |
26459920205
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..nnutils import geom_utils
from ..nnutils import loss_utils
from ..nnutils import train_utils
from ..nnutils import discriminators
from ..nnutils.smr import SoftRenderer
from ..nnutils import cub_mesh_s1 as mesh_net
from ..nnutils.nmr_pytorch import NeuralRenderer
from ..data import cub as cub_data
from ..utils import image as image_utils
from ..utils import tf_visualizer
from ..utils.tf_visualizer import Visualizer as TfVisualizer
import os
import time
import copy
import numpy as np
import os.path as osp
from absl import app, flags
from collections import OrderedDict
import torch
import torchvision
import soft_renderer as sr
import torchvision.utils as vutils
# Weights:
flags.DEFINE_float('mask_loss_wt', 3.0, 'mask loss weight')
flags.DEFINE_float('grl_wt', .2, 'gradient reversal layer weight')
flags.DEFINE_float('gan_loss_wt', 1., 'adversarial training weight')
flags.DEFINE_float('triangle_reg_wt', 0.15, 'weights to triangle smoothness prior')
flags.DEFINE_float('flatten_reg_wt', 0.0004, 'weights to flatten smoothness prior')
flags.DEFINE_float('deform_reg_wt', 5., 'reg to deformation')
flags.DEFINE_float('ori_reg_wt', 0.4, 'reg to orientation')
flags.DEFINE_float('stop_ori_epoch', 3., 'when to stop usint this constraint')
flags.DEFINE_float('tex_loss_wt', 3.0, 'weights to tex loss')
flags.DEFINE_float('tex_dt_loss_wt', 3.0, 'weights to tex dt loss')
flags.DEFINE_float('tex_cycle_loss_wt', .5, 'weights to tex cycle loss')
# Data:
flags.DEFINE_integer('image_size', 256, 'training image size')
# Model:
flags.DEFINE_string('renderer_type', 'softmax', 'choices are [hard, softmax]')
flags.DEFINE_boolean('use_gan', True, 'If true uses GAN training')
flags.DEFINE_boolean('pred_cam', True, 'If true predicts camera')
flags.DEFINE_boolean('detach_shape', True, 'If true detach shape from the texture branch.')
flags.DEFINE_boolean('detach_cam', True, 'If true detach camera from the texture branch.')
flags.DEFINE_boolean('use_scops', False, 'If true read part segmentations in the loader.')
flags.DEFINE_integer('update_template_freq', 5, 'template update frequency')
flags.DEFINE_integer('axis', 1, 'symmetric axis')
opts = flags.FLAGS
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
class ShapenetTrainer(train_utils.Trainer):
def define_model(self):
opts = self.opts
# define model
self.symmetric = opts.symmetric
img_size = (opts.img_size, opts.img_size)
self.model = mesh_net.MeshNet(
img_size, opts, nz_feat=opts.nz_feat,
axis = opts.axis)
self.model = self.model.cuda()
if(opts.multi_gpu):
self.model = torch.nn.DataParallel(self.model)
if(opts.use_gan):
self.discriminator = discriminators.Discriminator(lambda_ = opts.grl_wt,
img_size = opts.image_size)
self.discriminator = self.discriminator.cuda()
if(opts.multi_gpu):
self.discriminator = torch.nn.DataParallel(self.discriminator)
if(opts.multi_gpu):
faces = self.model.module.faces.view(1, -1, 3)
else:
faces = self.model.faces.view(1, -1, 3)
self.faces = faces.repeat(opts.batch_size, 1, 1)
# define renderers
self.renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.dis_renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.hard_renderer = SoftRenderer(opts.image_size, "hard")
if opts.use_texture:
self.tex_renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.tex_renderer.ambient_light_only()
self.vis_renderer = NeuralRenderer(opts.image_size)
self.vis_renderer.ambient_light_only()
self.vis_renderer.set_bgcolor([1, 1, 1])
self.vis_renderer.set_light_dir([0, 1, -1], 0.4)
self.iter_time = 0
return
def init_dataset(self):
opts = self.opts
self.data_module = cub_data
self.dataloader = self.data_module.data_loader(opts)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def define_criterion(self):
# shape objectives
self.mask_loss_fn = loss_utils.neg_iou_loss
if(opts.multi_gpu):
verts = self.model.module.get_mean_shape().cpu()
faces = self.model.module.faces.cpu()
else:
verts = self.model.get_mean_shape().cpu()
faces = self.model.faces.cpu()
self.laplacian_loss_fn = sr.LaplacianLoss(verts, faces).cuda()
self.flatten_loss_fn = sr.FlattenLoss(faces).cuda()
if(opts.multi_gpu):
self.laplacian_loss_fn = torch.nn.DataParallel(self.laplacian_loss_fn)
self.flatten_loss_fn = torch.nn.DataParallel(self.flatten_loss_fn)
# shape constraints
self.deform_reg_fn = loss_utils.deform_l2reg
self.ori_reg_fn = loss_utils.sym_reg
self.gan_loss_fn = torch.nn.functional.binary_cross_entropy_with_logits
# texture objectives
if self.opts.use_texture:
self.texture_loss = loss_utils.PerceptualTextureLoss()
self.texture_dt_loss_fn = loss_utils.texture_dt_loss
self.texture_cycle_fn = loss_utils.TexCycle(int(opts.batch_size/opts.gpu_num))
self.texture_cycle_fn = self.texture_cycle_fn.cuda()
if(opts.multi_gpu):
self.texture_cycle_fn = torch.nn.DataParallel(self.texture_cycle_fn)
def set_input(self, batch):
opts = self.opts
input_img_tensor = batch['img'].type(torch.FloatTensor)
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
img_tensor = batch['img'].type(torch.FloatTensor)
mask_tensor = batch['mask'].type(torch.FloatTensor)
self.input_imgs = input_img_tensor.cuda()
self.imgs = img_tensor.cuda()
self.masks = mask_tensor.cuda()
if(opts.use_texture):
# Compute barrier distance transform.
mask_dts = np.stack([image_utils.compute_dt_barrier(m) for m in mask_tensor])
dt_tensor = torch.FloatTensor(mask_dts).cuda()
self.dts_barrier = dt_tensor.unsqueeze(1)
def forward(self):
opts = self.opts
outputs = self.model.forward(self.input_imgs)
# shape
self.delta_v = outputs['delta_v']
if(opts.symmetric):
if(opts.multi_gpu):
delta_v = self.model.module.symmetrize(self.delta_v)
self.mean_shape = self.model.module.get_mean_shape()
else:
delta_v = self.model.symmetrize(self.delta_v)
self.mean_shape = self.model.get_mean_shape()
else:
delta_v = self.delta_v
self.pred_vs = self.mean_shape + delta_v
# camera
proj_cam = outputs['cam']
self.proj_cam = proj_cam
# shape losses
self.pred_seen, _, _ = self.renderer.forward(self.pred_vs, self.faces, proj_cam)
self.mask_pred_seen = self.pred_seen[:,3,:,:]
self.mask_loss = self.mask_loss_fn(self.mask_pred_seen, self.masks)
self.triangle_loss = self.laplacian_loss_fn(self.pred_vs).mean()
self.flatten_loss = self.flatten_loss_fn(self.pred_vs).mean()
self.deform_loss = self.deform_reg_fn(self.delta_v)
self.ori_loss = self.ori_reg_fn(self.pred_vs)
# texture losses
if(opts.use_texture):
self.tex_flow = outputs['tex_flow']
self.uvimage_pred = outputs['uvimage_pred']
self.tex = geom_utils.sample_textures(self.tex_flow, self.imgs)
self.tex = self.tex.contiguous()
bs, fs, ts, _, _ = self.tex.size()
self.tex = self.tex.view(bs, fs, -1, 3)
texture_rgba, p2f_info, _ = self.tex_renderer.forward(self.pred_vs.detach(), self.faces, proj_cam.detach(), self.tex)
self.texture_pred = texture_rgba[:,0:3,:,:]
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.masks, self.mask_pred_seen)
self.tex_dt_loss = self.texture_dt_loss_fn(self.tex_flow, self.dts_barrier)
# texture cycle loss
_, _, aggr_info = self.hard_renderer(self.pred_vs.detach(), self.faces, proj_cam.detach())
aggr_info = aggr_info[:, 1, :, :].view(bs, -1)
tex_cycle_loss, self.avg_flow = self.texture_cycle_fn(self.tex_flow, p2f_info.detach(), aggr_info.detach())
# The mean is used to collect loss from different GPUs
self.tex_cycle_loss = torch.mean(tex_cycle_loss)
self.p2f_info = p2f_info
if(opts.use_gan):
# render at unobserved view
angles = np.random.randint(0, 180, size=bs)
random_cams = geom_utils.rotate_cam(proj_cam.detach(), angles)
pred_unseen, _, _ = self.dis_renderer.forward(self.pred_vs, self.faces, random_cams)
self.mask_pred_unseen = pred_unseen[:,3,:,:]
pred = torch.cat((self.pred_seen.detach(), pred_unseen))
gan_labels = torch.cat((torch.ones(self.pred_seen.shape[0]),
torch.zeros(pred_unseen.shape[0])), dim = 0)
gan_labels = gan_labels.cuda()
gan_preds = self.discriminator(pred[:,3,:,:].unsqueeze(1))
self.gan_loss = self.gan_loss_fn(gan_preds.squeeze(), gan_labels)
# add up all losses
# shape
self.total_loss = self.mask_loss * opts.mask_loss_wt
self.total_loss += self.triangle_loss * opts.triangle_reg_wt
self.total_loss += self.flatten_loss * opts.flatten_reg_wt
if(self.curr_epoch < opts.stop_ori_epoch):
# constrain prediction to be symmetric on the given axis
self.total_loss += self.ori_loss * opts.ori_reg_wt
if(self.curr_epoch > opts.update_template_freq):
# constrain prediction from deviating from template
self.total_loss += self.deform_loss * opts.deform_reg_wt
# texture
if(opts.use_texture):
self.total_loss += self.tex_loss * opts.tex_loss_wt
self.total_loss += self.tex_dt_loss * opts.tex_dt_loss_wt
self.total_loss += self.tex_cycle_loss * opts.tex_cycle_loss_wt
# GAN
if(opts.use_gan):
self.total_loss += self.gan_loss * opts.gan_loss_wt
def get_current_visuals(self):
vis_dict = {}
# UV maps
if self.opts.use_texture:
uv_flows = self.uvimage_pred
uv_flows = uv_flows.permute(0, 2, 3, 1)
uv_images = torch.nn.functional.grid_sample(self.imgs, uv_flows)
vis_dict['uv_images'] = uv_images
# mask
vis_dict['mask_pred'] = self.mask_pred_seen.unsqueeze(1)
nb, nf, _, nc = self.tex.size()
tex = self.tex.detach().view(nb, nf, opts.tex_size, opts.tex_size, nc).unsqueeze(4).repeat(1, 1, 1, 1, opts.tex_size, 1)
vis_dict['mask_gt'] = self.masks.unsqueeze(1)
# image
vis_dict['image_pred'] = self.vis_renderer(self.pred_vs.detach(), self.faces, self.proj_cam.detach(), tex)
vis_dict['image_gt'] = self.imgs * self.masks.unsqueeze(1).repeat(1, 3, 1, 1)
# instance mesh
if(self.opts.use_texture):
mesh_ = sr.Mesh(self.pred_vs[0], self.faces[0], self.tex[0].view(self.faces.size(1),-1,3))
else:
mesh_ = sr.Mesh(self.pred_vs[0], self.faces[0])
vis_dict['mesh'] = mesh_
# template mesh
if(opts.multi_gpu):
template_mesh_ = sr.Mesh(self.model.module.get_mean_shape(), self.faces[0])
else:
template_mesh_ = sr.Mesh(self.model.get_mean_shape(), self.faces[0])
vis_dict['template_mesh'] = template_mesh_
return vis_dict
def get_current_scalars(self):
opts = self.opts
sc_dict = OrderedDict([
('smoothed_total_loss', self.smoothed_total_loss),
('total_loss', self.total_loss),
('mask_loss', self.mask_loss),
('tri_loss', self.triangle_loss),
('flatten_loss', self.flatten_loss),
('deform_loss', self.deform_loss),
('ori_loss', self.ori_loss),
('lr', self.optimizer.param_groups[0]['lr']),
('iter_time', self.iter_time),
])
if opts.use_texture:
sc_dict['tex_loss'] = self.tex_loss
sc_dict['tex_dt_loss'] = self.tex_dt_loss
sc_dict['tex_cycle_loss'] = self.tex_cycle_loss
return sc_dict
'''Overwrite train function for template update.'''
def train(self):
opts = self.opts
self.visualizer = TfVisualizer(opts)
self.smoothed_total_loss = 0
visualizer = self.visualizer
total_steps = 0
optim_steps = 0
dataset_size = len(self.dataloader)
for epoch in range(opts.num_pretrain_epochs, opts.num_epochs):
epoch_iter = 0
self.curr_epoch = epoch
for i, batch in enumerate(self.dataloader):
self.iteration_num += 1
self.adjust_learning_rate(self.optimizer)
t_init = time.time()
self.set_input(batch)
t_batch = time.time()
if not self.invalid_batch:
optim_steps += 1
self.optimizer.zero_grad()
start_time = time.time()
self.forward()
self.smoothed_total_loss = self.smoothed_total_loss*0.99 + 0.01*self.total_loss
t_forw = time.time()
self.total_loss.backward()
t_backw = time.time()
if optim_steps % opts.optim_bs == 0:
self.optimizer.step()
end_time = time.time()
self.iter_time = end_time - start_time
t_opt = time.time()
total_steps += 1
epoch_iter += 1
if opts.display_visuals and (total_steps % opts.display_freq == 0):
iter_end_time = time.time()
vis_dict = self.get_current_visuals()
for k,v in vis_dict.items():
if('mesh' in k):
v.save_obj(os.path.join(self.vis_dir,'{}.obj'.format(k)), save_texture=True)
else:
vutils.save_image(v, os.path.join(self.vis_dir, k + '.png'))
print(tf_visualizer.green("Visualization saved at {}.".format(self.vis_dir)))
if opts.print_scalars and (total_steps % opts.print_freq == 0):
scalars = self.get_current_scalars()
visualizer.print_current_scalars(epoch, epoch_iter, scalars)
if total_steps % opts.save_latest_freq == 0:
print(tf_visualizer.green('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps)))
self.save('latest')
if total_steps == opts.num_iter:
return
# update template
if((epoch+1) % opts.update_template_freq == 0):
print(tf_visualizer.green('Updating template...'))
self.feat = torch.zeros(opts.batch_size, opts.z_dim)
self.feat = self.feat.cuda()
# compute average encoder features
for i, batch in enumerate(self.dataloader):
self.set_input(batch)
with torch.no_grad():
outputs = self.model(self.input_imgs)
self.feat += outputs['feat']
self.feat = self.feat / (i + 1)
self.feat = torch.mean(self.feat, dim=0).unsqueeze(0)
# feed averaged features into the shape decoder
if(opts.multi_gpu):
with torch.no_grad():
delta_v = self.model.module.shape_predictor(self.feat)
self.model.module.mean_v += delta_v.squeeze()
else:
with torch.no_grad():
delta_v = self.model.shape_predictor(self.feat)
self.model.mean_v += delta_v.squeeze()
print(tf_visualizer.green('Template updated.'))
if (epoch+1) % opts.save_epoch_freq == 0:
print(tf_visualizer.green('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps)))
self.save('latest')
self.save(epoch+1)
def main(_):
torch.manual_seed(0)
trainer = ShapenetTrainer(opts)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)
|
NVlabs/UMR
|
experiments/train_s1.py
|
train_s1.py
|
py
| 17,158 |
python
|
en
|
code
| 223 |
github-code
|
6
|
[
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_float",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_integer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_boolean",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_boolean",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_boolean",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_boolean",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_boolean",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_integer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_integer",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "absl.flags.FLAGS",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "absl.flags",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "nnutils.train_utils.Trainer",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "nnutils.train_utils",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "nnutils.cub_mesh_s1.MeshNet",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "nnutils.cub_mesh_s1",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "nnutils.discriminators.Discriminator",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "nnutils.discriminators",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "nnutils.smr.SoftRenderer",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "nnutils.smr.SoftRenderer",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "nnutils.smr.SoftRenderer",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "nnutils.smr.SoftRenderer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "nnutils.nmr_pytorch.NeuralRenderer",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "data.cub",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils.neg_iou_loss",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "soft_renderer.LaplacianLoss",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "soft_renderer.FlattenLoss",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils.deform_l2reg",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "nnutils.loss_utils.sym_reg",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils.PerceptualTextureLoss",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "nnutils.loss_utils.texture_dt_loss",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "nnutils.loss_utils.TexCycle",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "nnutils.loss_utils",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "numpy.stack",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "utils.image.compute_dt_barrier",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "utils.image",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "nnutils.geom_utils.sample_textures",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "nnutils.geom_utils",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "torch.mean",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "nnutils.geom_utils.rotate_cam",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "nnutils.geom_utils",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.grid_sample",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "soft_renderer.Mesh",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "soft_renderer.Mesh",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "soft_renderer.Mesh",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "soft_renderer.Mesh",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer.Visualizer",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "utils.tf_visualizer.green",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "utils.tf_visualizer.green",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "utils.tf_visualizer.green",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer.green",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "utils.tf_visualizer.green",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "utils.tf_visualizer",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "torch.manual_seed",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "absl.app.run",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "absl.app",
"line_number": 411,
"usage_type": "name"
}
] |
37392717665
|
import streamlit as st
import requests,random,time
from deta import Deta
deta = Deta(st.secrets['key'])
# Base key
db= deta.Base("usernames")
st.set_page_config(page_title="Github Shoutout",page_icon="images/githublogo.png",layout="centered",initial_sidebar_state="auto") # setting the page config
def verifying(username):
if username:
try:
api_url = f"https://api.github.com/users/{username}" # api url
response = requests.get(api_url) # get response
data = response.json() # parse data as json
if db.get(username):
st.warning("Username already exists")
elif data["followers"] and data["name"] and data["bio"]: # if followers or following is not zero
db.put({"key":username}) # add entryin database with key lowercase username
st.success("Username stored in database.")
else:
st.error("Sorry, you don't have followers or your name and bio is not setup")
except Exception as e: # if username is not valid
print(e)
st.error("Invalid github username")
def random_username():
names = db.fetch().items
github_username=list(names[random.randint(0,len(names)-1)].values())[0]
try:
api_url = f"https://api.github.com/users/{github_username}" # api url
response = requests.get(api_url)
data = response.json()
acc_link=data['html_url']
st.markdown(f"""<div id='container'><img id='pfp' src="https://github.com/{github_username}.png" alt="github profile pic"/>
<h3>Name: {data['name']}</h3>
<p id="bio">Bio: {data['bio']}</p>
<p id="ff">Followers: {data["followers"]} | Following: {data["following"]}</p>
<table>
<tr>
<th>Stats</th>
<th>Streak</th>
<th>Languages</th>
</tr>
<tr>
<td><img src='http://github-profile-summary-cards.vercel.app/api/cards/stats?username={github_username}&theme=github_dark' width=200px height=100px></td>
<td><img src='https://streak-stats.demolab.com?user={github_username}&theme=github-dark&hide_border=true&border_radius=32&date_format=j%20M%5B%20Y%5D&ring=888888' width=180px height=100px></td>
<td><img src='http://github-profile-summary-cards.vercel.app/api/cards/repos-per-language?username={github_username}&theme=github_dark' width= 200px height=100px></td>
</tr>
</table><br><br>
<a target="_blank" href="{acc_link}">
<button id='btn'>
Follow {github_username} on GitHub
</button><br><br>
</a></div>""",unsafe_allow_html=True) #displaying the data
#
except Exception as e:
st.error("Something went wrong, try again later")
def main():
st.markdown("""<a href='https://github.com/samadpls/Github-Shoutout'><img src='https://img.shields.io/github/stars/samadpls/Github-Shoutout?color=red&label=star%20me&logoColor=red&style=social'></a>""",unsafe_allow_html=True)
img , heading = st.columns([1,8]) # using columns to display the heading and image
with img:
st.image("images/githublogo.png",width=70) # github logo
with heading:
st.markdown('# Shoutout to Github User') # heading
st.markdown("`Click on the button to see the profile`") # description
if st.button("Press Me"):
with st.spinner('Wait for it...'):
time.sleep(2)
random_username()
#New username
with st.expander("Add your profile :"): # sub header
text = st.empty()
username=text.text_input("Enter your github username",max_chars=40)
st.markdown(""" `
Made with 🤍 by samadpls
`
""") # footer
verifying(username.strip().lower())
if __name__=="__main__":
with open('styles.css') as f:
st.markdown(f"<style>{f.read()}</style>",unsafe_allow_html=True) # loading the css file
main()
|
samadpls/Github-Shoutout
|
app.py
|
app.py
|
py
| 4,179 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "deta.Deta",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.secrets",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "deta.Base",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.set_page_config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.warning",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.success",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.error",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "streamlit.error",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "streamlit.error",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "streamlit.expander",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "streamlit.empty",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 93,
"usage_type": "call"
}
] |
29836585751
|
# #Unzip the test directory
# !unzip drive/My\ Drive/CatVSDog/test1.zip
# #Unzip the train directory
# !unzip drive/My\ Drive/CatVSDog/train.zip
# Plotting the images of dog
import shutil
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import Conv2D
from keras.models import Sequential
import random
import os
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from numpy import save
from numpy import asarray
from os import listdir
from matplotlib import pyplot
from matplotlib.image import imread
folder = 'train/'
for i in range(9):
# define subplot
pyplot.subplot(330+1+i)
# define the filename
filename = folder + 'dog.'+str(i)+'.jpg'
# load image pixels
image = imread(filename)
# plot raw pixel data
pyplot.imshow(image)
pyplot.show()
# Plotting the images of cat
folder = 'train/'
for i in range(9):
# define subplot
pyplot.subplot(330+1+i)
# define the filename
filename = folder + 'cat.'+str(i)+'.jpg'
# load image pixels
image = imread(filename)
# plot raw pixel data
pyplot.imshow(image)
pyplot.show()
# define location of dataset
folder = 'train/'
photos, labels = list(), list()
# enumerate files in the directory
# for file in listdir(folder):
# #determine class
# output = 0.0
# if file.startswith('cat'):
# output = 1.0
# #load image
# photo = load_img(folder+file,target_size = (200,200))
# photo = img_to_array(photo)
# #store
# photos.append(photo)
# labels.append(output)
# #convert to a numpy arrays
# photos = asarray(photos)
# labels = asarray(labels)
# print(photos.shape,labels.shape)
# #save the reshaped photos
# save('dogs_vs_cats_photos.npy',photos)
# save('dogs_vs_cats_labels.npy',labels)
# #loading from numpy data
# from numpy import load
# photos = load('dogs_vs_cats_photos.npy')
# labels = load('dogs_vs_cats_labels.npy')
# print(photos.shape,labels.shape)
# Alternate method
# creating seperate directory for test->cat and test->dog as this is required
dataset_home = 'dataset_dogs_vs_cats/'
subdirs = ['train/', 'test/']
for subdir in subdirs:
labeldirs = ['dogs/', 'cats/']
for labeldir in labeldirs:
newdir = dataset_home+subdir+labeldir
os.makedirs(newdir, exist_ok=True)
print("DONE")
# Partitioning the test and train sets
random.seed(1)
val_ratio = 0.25
src_directory = 'train/'
for file in listdir(src_directory):
src = src_directory+'/'+file
dst_dir = 'train/'
if random.random() < val_ratio:
dst_dir = 'test/'
if file.startswith('cat'):
dst = dataset_home+dst_dir+'cats/'+file
shutil.copyfile(src, dst)
elif file.startswith('dog'):
dst = dataset_home + dst_dir+'dogs/'+file
shutil.copyfile(src, dst)
# Initialising the CNN
classifier = Sequential()
# Convolution
classifier.add(Conv2D(32, (3, 3), input_shape=(
200, 200, 3), activation='relu'))
# Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Flattening
classifier.add(Flatten())
# Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# Loading the model
# classifier.load_weights("/kaggle/output/weights.best.hdf5")
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('dataset_dogs_vs_cats/train/',
target_size=(200, 200),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset_dogs_vs_cats/test/',
target_size=(200, 200),
batch_size=32,
class_mode='binary')
# Select the path to store the final checkpoint after a epoch
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
classifier.fit_generator(training_set,
steps_per_epoch=8000,
epochs=50,
validation_data=test_set,
callbacks=callbacks_list,
validation_steps=2000)
|
mcaupybugs/CatsVSDogs
|
catvsdog.py
|
catvsdog.py
|
py
| 4,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.image.imread",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.image.imread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 166,
"usage_type": "call"
}
] |
38684469232
|
# pylint: disable=attribute-defined-outside-init,wrong-import-order,redefined-outer-name,invalid-name
import gc
from configparser import ConfigParser
from tempfile import TemporaryDirectory
import magic
import pytest
from storage.binary_service import BinaryService
from storage.db_interface_backend import BackEndDbInterface
from storage.MongoMgr import MongoMgr
from test.common_helper import create_test_firmware, get_config_for_testing, store_binary_on_file_system
TEST_FW = create_test_firmware()
@pytest.fixture
def binary_service():
with TemporaryDirectory(prefix='fact_test_') as tmp_dir:
config = get_config_for_testing(temp_dir=tmp_dir)
mongo_server = MongoMgr(config=config)
_init_test_data(config, tmp_dir)
yield BinaryService(config=config)
mongo_server.shutdown()
gc.collect()
def _init_test_data(config: ConfigParser, tmp_dir: str):
backend_db_interface = BackEndDbInterface(config=config)
backend_db_interface.add_firmware(TEST_FW)
store_binary_on_file_system(tmp_dir, TEST_FW)
backend_db_interface.shutdown()
def test_get_binary_and_file_name(binary_service):
binary, file_name = binary_service.get_binary_and_file_name(TEST_FW.uid)
assert file_name == TEST_FW.file_name, 'file_name not correct'
assert binary == TEST_FW.binary, 'invalid result not correct'
def test_get_binary_and_file_name_invalid_uid(binary_service):
binary, file_name = binary_service.get_binary_and_file_name('invalid_uid')
assert binary is None, 'should be none'
assert file_name is None, 'should be none'
def test_get_repacked_binary_and_file_name(binary_service):
tar, file_name = binary_service.get_repacked_binary_and_file_name(TEST_FW.uid)
assert file_name == f'{TEST_FW.file_name}.tar.gz', 'file_name not correct'
file_type = magic.from_buffer(tar, mime=False)
assert 'gzip compressed data' in file_type, 'Result is not an tar.gz file'
def test_get_repacked_binary_and_file_name_invalid_uid(binary_service):
binary, file_name = binary_service.get_repacked_binary_and_file_name('invalid_uid')
assert binary is None, 'should be none'
assert file_name is None, 'should be none'
def test_read_partial_binary(binary_service):
partial_binary = binary_service.read_partial_binary(TEST_FW.uid, 30, 14)
assert len(partial_binary) == 14
assert partial_binary == b'get_files_test', 'invalid result not correct'
def test_read_partial_binary_invalid_uid(binary_service):
result = binary_service.read_partial_binary('invalid_uid', 0, 1337)
assert result == b'', 'result should be empty'
|
5am1i/Fact
|
src/test/integration/storage/test_binary_service.py
|
test_binary_service.py
|
py
| 2,626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "test.common_helper.create_test_firmware",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "test.common_helper.get_config_for_testing",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "storage.MongoMgr.MongoMgr",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "storage.binary_service.BinaryService",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "storage.db_interface_backend.BackEndDbInterface",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "test.common_helper.store_binary_on_file_system",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "magic.from_buffer",
"line_number": 52,
"usage_type": "call"
}
] |
15426226201
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), 'classes'))
from classes import data_manager
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import StackingClassifier, VotingClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Embedding, GlobalMaxPool1D, Conv1D
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import autokeras as ak
from xgboost import XGBRegressor
from xgboost import XGBClassifier
# Prediction models:
# regressors and classifiers that take a positinal embedding vector as input,
# output storypoints (or other impact related value)
seed=42
class regressors:
@staticmethod
def get_autokeras_paraphrase5():
model = load_model("regression_models/autokeras5_desc_paraphrase_rmse", custom_objects=ak.CUSTOM_OBJECTS)
return model
@staticmethod
def get_autokeras_roberta3_mae():
model = load_model("regression_models/autokeras3_roberta_mae", custom_objects=ak.CUSTOM_OBJECTS)
return model
@staticmethod
def keras_convolutional(X_train, y_train, X_test, y_test, vocab_size, max_len):
#https://realpython.com/python-keras-text-classification/
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True, mode='min')
]
model = Sequential()
model.add(Embedding(input_dim=vocab_size+1,
output_dim=50,
input_length=max_len))
model.add(Conv1D(50, 5, activation='relu'))
model.add(GlobalMaxPool1D())
model.add(Dense(units=25, activation='relu'))
model.add(Dense(units=1, activation='relu'))
model.compile(optimizer=Adam(learning_rate=0.0001),
loss='mae',
metrics=['mse'],
run_eagerly=True)
history = model.fit(X_train, y_train,
epochs=15,
verbose=True,
validation_data=(X_test, y_test),
batch_size=50,
callbacks=callbacks)
return model
@staticmethod
def create_MLP(X, y):
model = MLPRegressor(random_state=seed)
model = model.fit(X, y)
pipe = Pipeline([('mlp', model)])
param_grid = {
'mlp__solver': ['sgd'],
'mlp__alpha': [0.01],
'mlp__learning_rate_init': [0.0001],
'mlp__max_iter': [300]
}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return model
@staticmethod
def create_SVR(X, y):
model = svm.SVR()
pipe = Pipeline([('standardize', StandardScaler()),
('svr', model)])
param_grid = {
'svr__C': [1.75], #1.957,1.8,2,1.7 #multi lang 1.75
'svr__gamma': ['scale'],
'svr__kernel': ['rbf'],
'svr__epsilon': [0.01], #0.1,0.01 #multi lang 0.01
'svr__degree': [2] #2,3,4
}
gs = gridsearch(pipe, param_grid, 'neg_mean_absolute_error') #neg_mean_squared_error
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_Randomforest(X, y):
model = RandomForestRegressor(random_state=seed, n_estimators=300, min_samples_leaf=4, max_depth=20)
pipe = Pipeline([('rtree', model)])
param_grid = {
# 'rtree__n_estimators': [300],
# 'rtree__min_samples_leaf': [4],
# 'rtree__max_depth': [20]
}
gs = gridsearch(pipe, param_grid, 'neg_mean_absolute_error')
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_XGBregressor(X_train, y_train):
model = XGBRegressor(learning_rate=0.001,
n_estimators=400,
n_jobs=5,
random_state=seed)
pipe = Pipeline([('XGB', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return model
@staticmethod
def keras_sequential_network(X_train, y_train, X_test, y_test, lr=0.001):
input_dim = len(X_train[0])
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True, mode='min')
]
model = Sequential()
model.add(Dense(100, input_dim=input_dim, kernel_initializer='normal', activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer=Adam(learning_rate=lr), metrics=['mse', 'mae'], run_eagerly=True)
model.fit(X_train, y_train,
epochs=15,
verbose=True,
validation_data=(X_test, y_test),
batch_size=50,
callbacks=callbacks)
pipe = Pipeline([('nn', model)])
param_grid = {}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return model
class classifiers(object):
@staticmethod #0.7133 - F 0.7220 - H 0.7281 - H2 0.73152
def create_mlpclassifier(X_train, y_train):
model = MLPClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
'mlp__max_iter':[200], #200, 400, 600, 800 | 200
'mlp__solver':['adam'], #'adam', 'lbfgs' | 'adam'
'mlp__alpha':[0.001], #0.0001, 0.001 | 0.001
'mlp__batch_size':[50], #100, 150, 200, 400 | 50
'mlp__learning_rate_init':[0.0001] #0.01, 0.001, 0.0001 | 0.0001
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6709 - F 0.6817
def create_Randomforest(X_train, y_train):
model = RandomForestClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
# 'rtree__n_estimators': [700], #best from range 150 - 700
# 'rtree__min_samples_leaf': [2], #best from range 1 - 7
# 'rtree__max_depth': [20]
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.7147 - F 0.7206 - H 0.7256 - H2 0.72656
def create_XGB(X_train, y_train):
model = XGBClassifier(seed=seed,
use_label_encoder=False)
pipe = Pipeline([('standardize', StandardScaler()),
('xgb', model)])
param_grid = {
'xgb__learning_rate':[0.05, 0.03], #0.2, 0.1, 0.15, 0.01 | 0.05
'xgb__n_estimators':[600, 800], #100, 300, 400, 500 | 600
'xgb__max_depth':[7], #4, 5, 6, 7, 8 | 7
'xgb__colsample_bytree':[0.2], #0.1, 0.2 | 0.2
'xgb__reg_lambda':[4, 6, 8] #1, 2, 3, 4 | 4
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6750 - F 0.6885
def create_GB(X_train, y_train):
#max_depth=6, n_estimators=500, random_state=42))])
# best parms: {'gb__learning_rate': 0.1, 'gb__max_depth': 6, 'gb__n_estimators': 500}
model = GradientBoostingClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('gb', model)])
param_grid = {
# 'gb__n_estimators': [500], #50 - 600
# 'gb__learning_rate': [0.1], #0.2 - 0.01
# 'gb__max_depth': [6], #1-7
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.7152 - F 0.7195 H 0.73417
def create_SVC(X_train, y_train):
model = svm.SVC(random_state=seed,
probability=True)
pipe = Pipeline([('standardize', StandardScaler()),
('svc', model)])
param_grid = {
'svc__kernel': ['rbf'], #'rbf', 'linear' | rbf
'svc__degree': [2], #2,3,4 | 2
'svc__gamma': ['scale'], #'auto', 'scale' | 'scale'
'svc__C': [1.95] #1, 1.95 | 1.95
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6670 - F 0.6735
def create_KNN(X_train, y_train):
model = KNeighborsClassifier()
pipe = Pipeline([('standardize', StandardScaler()),
('KNN', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6764 - F 0.667
def create_SGD(X_train, y_train):
model = SGDClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #F - 0.7311 - H2 0.73587
def create_voting(X_train, y_train):
SVC = svm.SVC(random_state=seed,
probability=True,
kernel='rbf',
degree=2,
gamma='scale',
C=1.95)
XGB = XGBClassifier(seed=seed,
learning_rate=0.05,
n_estimators=600,
max_depth=7,
reg_lambda=4,
colsample_bytree=0.2,
use_label_encoder=False)
MLP = MLPClassifier(random_state=seed,
max_iter=200,
solver='adam',
alpha=0.001,
batch_size=50,
learning_rate_init=0.0001)
estimators = [
('svc', SVC),
('xgb', XGB),
('mlp', MLP)
]
model = VotingClassifier(
estimators=estimators,
voting='soft',
weights=[1,1,1],
n_jobs=-1,
verbose=True)
pipe = Pipeline([('standardize', StandardScaler()),
('vc', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
print('voting done')
return gs
@staticmethod #F - 0.72848 - H2 0.7373
def create_stacking(X_train, y_train):
SVC = svm.SVC(random_state=seed,
probability=True,
kernel='rbf',
degree=2,
gamma='scale',
C=1.95)
XGB = XGBClassifier(seed=seed,
learning_rate=0.05,
n_estimators=600,
max_depth=7,
reg_lambda=4,
colsample_bytree=0.2,
use_label_encoder=False)
MLP = MLPClassifier(random_state=seed,
max_iter=200,
solver='adam',
alpha=0.001,
batch_size=50,
learning_rate_init=0.0001)
estimators = [
('svc', SVC),
('xgb', XGB),
('mlp', MLP)
]
model = StackingClassifier(
estimators=estimators,
final_estimator=LogisticRegression(random_state=42)
)
pipe = Pipeline([('standardize', StandardScaler()),
('stack', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
print('stacking done')
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_logisticregression(X_train, y_train):
model = LogisticRegression(random_state=42)
pipe = Pipeline([('standardize', StandardScaler()),
('lg', model)])
param_grid = {
'lg__max_iter':[600]
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
def gridsearch(pipe, param_grid, metric):
gs = GridSearchCV(pipe,
param_grid,
verbose=0,
cv=5,
scoring=metric,
n_jobs=4,
return_train_score=True)
return gs
|
JaapvDijk/PredictTaskImpactNLP
|
classes/prediction_models.py
|
prediction_models.py
|
py
| 14,556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "autokeras.CUSTOM_OBJECTS",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "autokeras.CUSTOM_OBJECTS",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.callbacks.EarlyStopping",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Embedding",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv1D",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.GlobalMaxPool1D",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sklearn.neural_network.MLPRegressor",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "xgboost.XGBRegressor",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.callbacks.EarlyStopping",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "xgboost.XGBClassifier",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.GradientBoostingClassifier",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.SGDClassifier",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "xgboost.XGBClassifier",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.VotingClassifier",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "xgboost.XGBClassifier",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.StackingClassifier",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 431,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "classes.data_manager.print_gridsearch_best_stats",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "classes.data_manager",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 454,
"usage_type": "call"
}
] |
24337846458
|
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
from PIL import Image
from keras import models
from keras import layers
from keras import optimizers
from keras import regularizers
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
models_save_dir = './models/'
if not os.path.exists(models_save_dir):
os.mkdir(models_save_dir)
dataset_dir = './datasets/raw_datasets/Images/'
train_dir = './datasets/train/'
validation_dir = './datasets/validation/'
test_dir = './datasets/test/'
# if the second arguments is '-n' then split data again
if len(sys.argv) >= 2 and sys.argv[1] == '-n':
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(validation_dir):
shutil.rmtree(validation_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(validation_dir)
os.mkdir(test_dir)
for i in range(0, 43):
#一共43个分类,每个循环一次,按照8:1:1的比例分配 训练/validation/测试 数据
category = i
foldername = str(i).zfill(5)
foldername_new = str(i)
dataset_path = os.path.join(dataset_dir, foldername)
train_path = os.path.join(train_dir, foldername_new)
os.mkdir(train_path)
validation_path = os.path.join(validation_dir, foldername_new)
os.mkdir(validation_path)
test_path = os.path.join(test_dir, foldername_new)
os.mkdir(test_path)
dataset = np.array(os.listdir(dataset_path))
np.random.shuffle(dataset)
#train_dataset, test_dataset = train_test_split(dataset, target, test_size=0.2)
"""
train_test_split method raise 'too many values to unpack' error
so use array slice simplely
"""
train_dataset = dataset[0:int(len(dataset)*0.8)]
validation_dataset = dataset[int(len(dataset)*0.8):int(len(dataset)*0.9)]
test_dataset = dataset[int(len(dataset)*0.9):]
for train_item in train_dataset:
im = Image.open(os.path.join(dataset_path, train_item))
im.save(os.path.join(train_path, train_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, train_item), train_path)
for validation_item in validation_dataset:
im = Image.open(os.path.join(dataset_path, validation_item))
im.save(os.path.join(validation_path, validation_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, validation_item), validation_path)
for test_item in test_dataset:
im = Image.open(os.path.join(dataset_path, test_item))
im.save(os.path.join(test_path, test_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, test_item), test_path)
"""
clear_session every trian
"""
K.clear_session()
batch_size = 10
steps_per_epoch = int(sum([len(files) for r, d, files in os.walk(train_dir)])/batch_size)
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(50, 50, 3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(43, activation='softmax'))
"""
check our model summary
"""
#model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['accuracy']
)
"""
start processing input data
turn raw image to numpy array
"""
train_datagen = ImageDataGenerator(rescale=1./255,
#rotation_range=40,
#width_shift_range=0.2,
#height_shift_range=0.2,
#shear_range=0.2,
#zoom_range=0.2,
#horizontal_flip=True,
#fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255,
#rotation_range=40,
#width_shift_range=0.2,
#height_shift_range=0.2,
#shear_range=0.2,
#zoom_range=0.2,
#horizontal_flip=True,
#fill_mode='nearest'
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(50,50),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(50,50),
batch_size=batch_size,
class_mode='categorical')
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
history = model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=20,
validation_data=validation_generator,
validation_steps=15,
callbacks=[earlystopping])
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(50,50),
batch_size=20,
class_mode='categorical')
loss, acc = model.evaluate_generator(test_generator, 20)
model.save(os.path.join(models_save_dir, 'traffic_' + datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S') + '_' + str(acc) + '.h5'))
|
jabez128/dl-trafficsigns-detection
|
classifier.py
|
classifier.py
|
py
| 5,975 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "keras.backend.clear_session",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "keras.optimizers.RMSprop",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "keras.optimizers",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.EarlyStopping",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 195,
"usage_type": "attribute"
}
] |
21216112261
|
import requests
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
NEWS_API_KEY = 'caa8a3621a5e481c96807e77fe1dfc91'
news_params = {
'q': "Tesla Inc",
'apiKey': NEWS_API_KEY
}
response = requests.get(url=NEWS_ENDPOINT, params=news_params)
response.raise_for_status()
data = response.json()["articles"]
article = []
for i in range(3):
article.append(data[i])
print(article)
|
myoaung99/Stock-news
|
eg.py
|
eg.py
|
py
| 390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
}
] |
29157621722
|
# -*- coding: utf-8 -*-
import logging
import aiogrpc
class AsyncPluginManager:
"""
Connects to a running mavsdk server or starts one and manages plugins
"""
@classmethod
async def create(cls, host, port=50051):
self = AsyncPluginManager()
self.host = host
self.port = port
self.plugins = {}
await self._connect_backend()
return self
async def _connect_backend(self):
"""
Initializes the connection to the running backend
"""
#: gRPC channel
self._channel = aiogrpc.insecure_channel(
"{}:{}".format(self.host, self.port),
standalone_pool_for_streaming=True
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler()) # Avoid errors when user has not configured logging
logger.debug("Waiting for mavsdk_server to be ready...")
await aiogrpc.channel_ready_future(self._channel)
logger.debug("Connected to mavsdk_server!")
@property
def channel(self):
"""
gRPC channel to the backend
"""
return self._channel
|
mavlink/MAVSDK-Python
|
mavsdk/async_plugin_manager.py
|
async_plugin_manager.py
|
py
| 1,162 |
python
|
en
|
code
| 246 |
github-code
|
6
|
[
{
"api_name": "aiogrpc.insecure_channel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.NullHandler",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "aiogrpc.channel_ready_future",
"line_number": 38,
"usage_type": "call"
}
] |
38827999454
|
import random
import qrcode
import qrcode.image.svg
from io import BytesIO
from django.shortcuts import render
from django.views.generic import View
class IndexView(View):
def get(self, request, *args, **kwargs):
template = 'index.html'
return render(
request,
template,
)
def generate_random_code():
num = "12345678900987654321"
numbers = random.sample(num, 5)
five_last_number = ''
for number in numbers:
five_last_number += number
return five_last_number
class CustomerQrAndBarcodeScan(View):
def post(self, request, *args, **kwargs):
templates_text = request.POST['qr_text']
print(templates_text)
factory = qrcode.image.svg.SvgImage
text = generate_random_code()
print(text)
img = qrcode.make(text,
image_factory=factory, box_size=20)
streem = BytesIO()
img.save(streem)
context = {}
context['svg'] = streem.getvalue().decode()
return render(request, "index.html", context)
|
AbrahamAdekunle/Bashir_abraham_ERP
|
bar_qr_code/views.py
|
views.py
|
py
| 1,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.View",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "qrcode.image",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "qrcode.make",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 45,
"usage_type": "call"
}
] |
38047305072
|
from flask import Flask, jsonify, render_template
import psutil
import subprocess
app = Flask(__name__)
def get_gpu_usage():
result = subprocess.check_output("nvidia-smi --query-gpu=utilization.gpu --format=csv,noheader,nounits", shell=True)
gpu_usage = float(result.strip())
return gpu_usage
@app.route('/')
def home():
return render_template('index.html')
@app.route('/system_info')
def system_info():
info = {
"cpu_usage": psutil.cpu_percent(),
"ram_usage": psutil.virtual_memory().percent,
"disk_usage": psutil.disk_usage('/').percent,
"network_info": psutil.net_io_counters(pernic=True),
"gpu_usage": get_gpu_usage()
}
return jsonify(info)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
agbld/webserver_for_system_infos
|
app.py
|
app.py
|
py
| 784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "psutil.disk_usage",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "psutil.net_io_counters",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
}
] |
25068490855
|
from typing import Tuple, List
from asendia_us_lib.shipping_rate_request import ShippingRateRequest
from asendia_us_lib.shipping_rate_response import ShippingRate
from purplship.core.units import Packages, Services, Options
from purplship.core.utils import Serializable, DP, NF
from purplship.core.models import (
RateRequest,
RateDetails,
Message
)
from purplship.providers.asendia_us.units import Service, Option, ProcessingLocation
from purplship.providers.asendia_us.error import parse_error_response
from purplship.providers.asendia_us.utils import Settings
def parse_rate_response(response: dict, settings: Settings) -> Tuple[List[RateDetails], List[Message]]:
errors = parse_error_response(response, settings)
details = [
_extract_details(detail, settings)
for detail in (response.get('shippingRates') or [])
]
return details, errors
def _extract_details(detail: dict, settings: Settings) -> RateDetails:
rate = DP.to_object(ShippingRate, detail)
return RateDetails(
carrier_id=settings.carrier_id,
carrier_name=settings.carrier_name,
currency=rate.currencyType,
service=Service.map(rate.productCode).name_or_key,
base_charge=NF.decimal(rate.rate),
total_charge=NF.decimal(rate.rate)
)
def rate_request(payload: RateRequest, settings: Settings) -> Serializable[ShippingRateRequest]:
package = Packages(payload.parcels).single
service = (Services(payload.services, Service).first or Service.asendia_us_all).value
options = Options(payload.options, Option)
request = ShippingRateRequest(
accountNumber=settings.account_number,
subAccountNumber=options.asendia_sub_account_number,
processingLocation=ProcessingLocation.map(options.asendia_processing_location or "SFO").name,
recipientPostalCode=payload.recipient.postal_code,
recipientCountryCode=payload.recipient.country_code,
totalPackageWeight=package.weight.value,
weightUnit=package.weight_unit.value.lower(),
dimLength=package.length.value,
dimWidth=package.width.value,
dimHeight=package.height.value,
dimUnit=package.dimension_unit.value,
productCode=service,
)
return Serializable(request, DP.to_dict)
|
danh91/purplship
|
sdk/extensions/asendia_us/purplship/providers/asendia_us/rate.py
|
rate.py
|
py
| 2,301 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "purplship.providers.asendia_us.utils.Settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "purplship.providers.asendia_us.error.parse_error_response",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "purplship.core.models.RateDetails",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "purplship.core.models.Message",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "purplship.providers.asendia_us.utils.Settings",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.DP.to_object",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "asendia_us_lib.shipping_rate_response.ShippingRate",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "purplship.core.utils.DP",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "purplship.core.models.RateDetails",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.Service.map",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.Service",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.NF.decimal",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "purplship.core.utils.NF",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.NF.decimal",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "purplship.core.utils.NF",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "purplship.core.models.RateDetails",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "purplship.core.models.RateRequest",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "purplship.providers.asendia_us.utils.Settings",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "purplship.core.units.Packages",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "purplship.core.units.Services",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.Service",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "purplship.providers.asendia_us.units.Service.asendia_us_all",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "purplship.core.units.Options",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.Option",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "asendia_us_lib.shipping_rate_request.ShippingRateRequest",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.ProcessingLocation.map",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "purplship.providers.asendia_us.units.ProcessingLocation",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.Serializable",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "purplship.core.utils.DP.to_dict",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "purplship.core.utils.DP",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.Serializable",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "asendia_us_lib.shipping_rate_request.ShippingRateRequest",
"line_number": 40,
"usage_type": "name"
}
] |
26169526568
|
import argparse
import json
import os
import time
import torch
from redsandt.encoder.bert_encoder import BERTEncoder
from redsandt.framework.bag_re import BagRE
from redsandt.selector.bag_attention import BagAttention
# Pass arguments
parser = argparse.ArgumentParser(
description='Improving Distantly-Supervised Relation Extraction through BERT-based Label & Instance Embeddings')
parser.add_argument('--train', dest="train", action='store_true', help='training mode')
parser.add_argument('--eval', dest="eval", action='store_true', help='evaluation mode')
parser.add_argument('--dataset', dest="dataset", required=True, help='dataset')
parser.add_argument('--config', dest="config", required=True, help='configuration file')
parser.add_argument('--model_dir', dest="model_dir", required=True, help='model dir')
parser.add_argument('--model_name', dest="model_name", required=True, help='model name')
args = parser.parse_args()
# Some basic settings
ROOT_PATH = '.'
DATASET = args.dataset # NYT-10 or GDS
MODEL_DIR = args.model_dir
MODEL_NAME = args.model_name
config = json.load(open(args.config))
# Create folders
if not os.path.exists('experiments/ckpt/' + DATASET + '/' + MODEL_DIR):
os.makedirs('experiments/ckpt/' + DATASET + '/' + MODEL_DIR)
if not os.path.exists('experiments/outputs/' + DATASET + '/' + MODEL_DIR):
os.makedirs('experiments/outputs/' + DATASET + '/' + MODEL_DIR)
ckpt = 'experiments/ckpt/' + DATASET + '/' + MODEL_DIR + '/' + MODEL_NAME + '.pth.tar'
if DATASET == 'NYT-10':
rel2id = json.load(open(os.path.join(ROOT_PATH, 'benchmark/NYT-10-enhanced/nyt10_rel2id.json')))
elif DATASET == 'GDS':
rel2id = json.load(open(os.path.join(ROOT_PATH, 'benchmark/GDS-enhanced/gids_rel2id.json')))
# DEFINE SENTENCE ENCODER
print('Defining the sentence encoder...')
sentence_encoder = BERTEncoder(max_length=config['encoder']['max_length'], num_labels=config['encoder']['num_labels'],
pretrained_model=config['encoder']['pretrained_model'],
drop=config['encoder']['encoder_dropout'], freeze_bert=config['encoder']['freeze_bert'],
text_stp=config['encoder']['text_stp'], entity_types=config['encoder'][
'entity_types'], dataset=DATASET)
# DEFINE MODEL
print("\nDefining model...")
model = BagAttention(sentence_encoder, len(rel2id), rel2id, config['framework']['selector_dropout'])
# DEFINE TRAINING FRAMEWORK
print("\nDefining learning framework...")
model_path = DATASET + '/' + MODEL_DIR
framework = BagRE(train_path=config['train_data_path'], val_path=config['val_data_path'],
test_path=config['test_data_path'], model_name=model_path, model=model, ckpt=ckpt,
batch_size=config['framework']['batch_size'], max_epoch=config['framework']['max_epoch'],
lr=config['framework']['lr'], weight_decay=config['framework']['weight_decay'],
warmup_step_ratio=config['framework']['warmup_step_ratio'], opt=config['framework']['opt'],
weighted_loss=config['framework']['weighted_loss'], bag_size=config['framework']['bag_size'])
# TRAIN MODEL
if args.train:
print("\nTraining model...")
start = time.time()
framework.train_model()
end = time.time()
print("Training time: ", end - start, "sec.")
# EVALUATE MODEL
if args.eval:
print("\nEvaluate model on testing data...")
start = time.time()
framework.load_state_dict(torch.load(ckpt)['state_dict'])
result = framework.eval_model(framework.test_loader, save_eval_metrics=True)
end = time.time()
print("Testing time: ", end - start, "sec.")
# Print Statistics
print('AUC: {}'.format(result['auc']))
print('P@100: {}'.format(result['p@100']))
print('P@200: {}'.format(result['p@200']))
print('P@300: {}'.format(result['p@300']))
print('P@500: {}'.format(result['p@500']))
print('P@1000: {}'.format(result['p@1000']))
print('P@2000: {}'.format(result['p@2000']))
print('P@all: {}'.format(result['p@all']))
print('\nRelation Distribution on Top 300 predictions:')
for key, value in result['rel_dist_at_300'].items():
print(key, ": ", value)
|
DespinaChristou/REDSandT
|
redsandt.py
|
redsandt.py
|
py
| 4,210 |
python
|
en
|
code
| 22 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "redsandt.encoder.bert_encoder.BERTEncoder",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "redsandt.selector.bag_attention.BagAttention",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "redsandt.framework.bag_re.BagRE",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 78,
"usage_type": "call"
}
] |
22982145642
|
import sys
import json
import os
import io
import collections
import argparse
import logging
from e2edutch import conll
from e2edutch import minimize
from e2edutch import util
from e2edutch import coref_model as cm
from e2edutch import naf
import tensorflow.compat.v1 as tf
logger = logging.getLogger('e2edutch')
class Predictor(object):
"""
A predictor object loads a pretrained e2e model to predict coreferences.
It can be used to predict coreferences on tokenized text.
"""
def __init__(self, model_name='final', config=None, verbose=False):
if verbose:
logger.setLevel(logging.INFO)
if config:
self.config = config
else:
# if no configuration is provided, try to get a default config.
self.config = util.initialize_from_env(model_name=model_name)
# Clear tensorflow context:
tf.reset_default_graph()
self.session = tf.compat.v1.Session()
try:
self.model = cm.CorefModel(self.config)
self.model.restore(self.session)
except ValueError:
raise Exception("Trying to reload the model while the previous " +
"session hasn't been ended. Close the existing " +
"session with predictor.end_session()")
def predict(self, example):
"""
Predict coreference spans for a tokenized text.
Args:
example (dict): dict with the following fields:
sentences ([[str]])
doc_id (str)
clusters ([[(int, int)]]) (optional)
Returns:
[[(int, int)]]: a list of clusters. The items of the cluster are
spans, denoted by their start end end token index
"""
tensorized_example = self.model.tensorize_example(
example, is_training=False)
feed_dict = {i: t for i, t in zip(
self.model.input_tensors, tensorized_example)}
_, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = self.session.run(
self.model.predictions, feed_dict=feed_dict)
predicted_antecedents = self.model.get_predicted_antecedents(
top_antecedents, top_antecedent_scores)
predicted_clusters, _ = self.model.get_predicted_clusters(
top_span_starts, top_span_ends, predicted_antecedents)
return predicted_clusters
def end_session(self):
"""
Close the session, clearing the tensorflow model context.
"""
self.session.close()
tf.reset_default_graph()
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('input_filename')
parser.add_argument('-o', '--output_file',
type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-f', '--format_out', default='conll',
choices=['conll', 'jsonlines', 'naf'])
parser.add_argument('-m', '--model',
type=str,
default='final',
help="model name")
parser.add_argument('-c', '--word_col', type=int, default=2)
parser.add_argument('--cfg_file',
type=str,
default=None,
help="config file")
parser.add_argument('--model_cfg_file',
type=str,
default=None,
help="model config file")
parser.add_argument('-v', '--verbose', action='store_true')
return parser
def read_jsonlines(input_filename):
for line in open(input_filename).readlines():
example = json.loads(line)
yield example
def main(args=None):
parser = get_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
# Input file in .jsonlines format or .conll.
input_filename = args.input_filename
ext_input = os.path.splitext(input_filename)[-1]
if ext_input not in ['.conll', '.jsonlines', '.txt', '.naf']:
raise Exception(
'Input file should be .naf, .conll, .txt or .jsonlines, but is {}.'
.format(ext_input))
if ext_input == '.conll':
labels = collections.defaultdict(set)
stats = collections.defaultdict(int)
docs = minimize.minimize_partition(
input_filename, labels, stats, args.word_col)
elif ext_input == '.jsonlines':
docs = read_jsonlines(input_filename)
elif ext_input == '.naf':
naf_obj = naf.get_naf(input_filename)
jsonlines_obj, term_ids, tok_ids = naf.get_jsonlines(naf_obj)
docs = [jsonlines_obj]
else:
text = open(input_filename).read()
docs = [util.create_example(text)]
output_file = args.output_file
config = util.initialize_from_env(model_name=args.model,
cfg_file=args.cfg_file,
model_cfg_file=args.model_cfg_file)
predictor = Predictor(config=config)
sentences = {}
predictions = {}
for example_num, example in enumerate(docs):
example["predicted_clusters"] = predictor.predict(example)
if args.format_out == 'jsonlines':
output_file.write(json.dumps(example))
output_file.write("\n")
else:
predictions[example['doc_key']] = example["predicted_clusters"]
sentences[example['doc_key']] = example["sentences"]
if example_num % 100 == 0:
logger.info("Decoded {} examples.".format(example_num + 1))
if args.format_out == 'conll':
conll.output_conll(output_file, sentences, predictions)
elif args.format_out == 'naf':
# Check number of docs - what to do if multiple?
# Create naf obj if input format was not naf
if ext_input != '.naf':
# To do: add linguistic processing layers for terms and tokens
logger.warn(
'Outputting NAF when input was not naf,'
+ 'no dependency information available')
for doc_key in sentences:
naf_obj, term_ids = naf.get_naf_from_sentences(
sentences[doc_key])
naf_obj = naf.create_coref_layer(
naf_obj, predictions[doc_key], term_ids)
naf_obj = naf.add_linguistic_processors(naf_obj)
buffer = io.BytesIO()
naf_obj.dump(buffer)
output_file.write(buffer.getvalue().decode('utf-8'))
# To do, make sepearate outputs?
# TO do, use dependency information from conll?
else:
# We only have one input doc
naf_obj = naf.create_coref_layer(
naf_obj, example["predicted_clusters"], term_ids)
naf_obj = naf.add_linguistic_processors(naf_obj)
buffer = io.BytesIO()
naf_obj.dump(buffer)
output_file.write(buffer.getvalue().decode('utf-8'))
if __name__ == "__main__":
main()
|
Filter-Bubble/e2e-Dutch
|
e2edutch/predict.py
|
predict.py
|
py
| 7,163 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "e2edutch.util.initialize_from_env",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "e2edutch.util",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.reset_default_graph",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.compat.v1.Session",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.compat",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "e2edutch.coref_model.CorefModel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "e2edutch.coref_model",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.reset_default_graph",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "e2edutch.minimize.minimize_partition",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "e2edutch.minimize",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.get_naf",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.get_jsonlines",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "e2edutch.util.create_example",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "e2edutch.util",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "e2edutch.util.initialize_from_env",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "e2edutch.util",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "e2edutch.conll.output_conll",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "e2edutch.conll",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.get_naf_from_sentences",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.create_coref_layer",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.add_linguistic_processors",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf.create_coref_layer",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "e2edutch.naf.add_linguistic_processors",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "e2edutch.naf",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 190,
"usage_type": "call"
}
] |
34540343891
|
import argparse
import sys
from operator import add
import os
import shlex
import shutil
from subprocess import Popen, PIPE
from pyspark import SparkContext, SparkConf
import pyspark.serializers
import subprocess
import boto3
import re
global parser_result
if sys.version > "3.4":
pyspark.serializers.protocol = 4
APPLICATION_FOLDER = "/app"
GENOME_REFERENCES_FOLDER = "/mnt/ref"
TEMP_OUTPUT_FOLDER = "/mnt/output"
HDFS_TEMP_OUTPUT_FOLDER = "/tmp/sam_chunks"
#################################
# File splitting
#################################
def split_interleaved_file(file_prefix, file_content, output_dir):
"""
Unpacks an interleaved file into the standard FASTQ format
:param file_prefix: the prefix of the file name
:param file_content: the lines of content from the input file
:param output_dir: the location to store the unpacked files
:return: a tuple with first element being a list of output file names
(1 for se, 2 for pe); 2nd element a boolean flag - True if pe data,
False otherwise
"""
fastq_line_count_se = 4
fastq_line_count_pe = 8
paired_reads = False
output_file_names = []
file_prefix = output_dir + "/" + file_prefix
output_file = file_prefix + "_1.fq"
output_file_names.append(output_file)
output_file_writer = open(output_file, 'w')
count = 0
for line in file_content.strip().split("\n"):
# In the first line, check if it's paired or not
if count == 0 and len(line.strip().split("\t")) == fastq_line_count_pe:
paired_reads = True
output_file_pair = file_prefix + "_2.fq"
output_file_names.append(output_file_pair)
output_pair_writer = open(output_file_pair, 'w')
if paired_reads:
parts = line.strip().split("\t")
if len(parts) != fastq_line_count_pe:
continue
read_one = parts[:fastq_line_count_se]
read_two = parts[fastq_line_count_se:]
output_file_writer.write("\n".join(read_one) + "\n")
output_pair_writer.write("\n".join(read_two) + "\n")
else:
output_file_writer.writelines(line.strip().replace("\t", "\n") + "\n")
count += 1
output_file_writer.close()
if paired_reads:
output_pair_writer.close()
return output_file_names, paired_reads
#################################
# Aligner
#################################
def align_reads_star(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
# paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
aligner_args = "{app_folder}/STAR/STAR --runThreadN 4 {aligner_extra_args} --genomeDir {index_folder} " \
"--readFilesIn {fastq_file_names} --outFileNamePrefix {output_folder} --outSAMtype BAM Unsorted".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/star_index",
fastq_file_names=" ".join(file_names),
output_folder=alignment_output_dir + "/")
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("STAR failed to complete (Non-zero return code)!\n"
"STAR stdout: {std_out} \nSTAR stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
if aligner_error.decode("utf8").strip() != "" or not os.path.isfile(alignment_output_dir + "/Log.final.out"):
raise ValueError("STAR failed to complete (No output file is found)!\n"
"STAR stdout: {std_out} \nSTAR stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
bam_file_name_output = "Aligned.out.bam"
return bam_file_name_output
def align_reads_hisat(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
if paired_read:
fastq_file_args = "-1 {} -2 {}".format(*file_names)
else:
fastq_file_args = "-U {}".format(*file_names)
aligner_args = "{app_folder}/hisat/hisat2 -p 4 --tmo {aligner_extra_args} -x {index_folder}/hisat2.index " \
"{fastq_file_names} -S {output_folder}/output.sam".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/hisat_index",
fastq_file_names=fastq_file_args,
output_folder=alignment_output_dir)
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("HISAT2 failed to complete (Non-zero return code)!\n"
"HISAT2 stdout: {std_out} \nHISAT2 stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
samtools_args = "{app_folder}/samtools/samtools view -@ 4 -o {output_folder}/output.bam {output_folder}/output.sam". \
format(app_folder=APPLICATION_FOLDER,
output_folder=alignment_output_dir)
print("Command: " + samtools_args)
samtools_process = Popen(shlex.split(samtools_args), stdout=PIPE, stderr=PIPE)
samtools_out, samtools_error = samtools_process.communicate()
if samtools_process.returncode != 0:
raise ValueError("Samtools failed to complete (Non-zero return code)!\n"
"Samtools stdout: {std_out} \nSamtools stderr: {std_err}".format(
std_out=samtools_out.decode("utf8"), std_err=samtools_error.decode("utf8")))
sam_file_name_output = "output.bam"
return sam_file_name_output
def align_reads_subread(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
print("Aligning with subread")
if paired_read:
fastq_file_args = "-r {} -R {}".format(*file_names)
else:
fastq_file_args = "-r {}".format(*file_names)
aligner_args = "{app_folder}/subread/subread-align -T 4 -t 0 --SAMoutput {aligner_extra_args} " \
"-i {index_folder}/genome {fastq_file_names} -o {output_folder}/output.bam".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/subread_index",
fastq_file_names=fastq_file_args,
output_folder=alignment_output_dir)
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("Subread failed to complete (Non-zero return code)!\n"
"Subread stdout: {std_out} \nSubread stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
sam_file_name_output = "output.bam"
return sam_file_name_output
#################################
# Main functions
#################################
def alignment_step(keyval):
# Input: file_name, file_content as key,val
# Output: [sample_name, file_name] as [key,val]
global parser_result
prefix_regex = r"(.*_part[0-9]*)\."
file_name, file_content = keyval
prefix_match = re.findall(prefix_regex, file_name.rstrip("/").split("/")[-1])
if len(prefix_match) != 1:
raise ValueError("Filename can not be resolved (invalid, pattern mismatch): {}".format(file_name))
prefix = prefix_match[0]
sample_name = prefix.rsplit("_part", 1)[0]
alignment_dir = TEMP_OUTPUT_FOLDER + "/alignment_" + prefix
try:
os.mkdir(alignment_dir)
except:
print('Alignment directory {} exist.'.format(alignment_dir))
print("Recreating FASTQ file(s)")
split_file_names, paired_reads = split_interleaved_file(prefix, file_content, alignment_dir)
print("Recreating FASTQ file(s) complete. Files recreated: {}".format(",".join(split_file_names)))
alignment_output_dir = alignment_dir + "/aligner_output"
try:
os.mkdir(alignment_output_dir)
except:
print('Alignment output directory {} exist.'.format(alignment_output_dir))
if parser_result.aligner.lower() == "star":
aligned_sam_output = align_reads_star(sample_name, split_file_names, alignment_output_dir)
elif parser_result.aligner.lower() == "hisat" or parser_result.aligner.lower() == "hisat2":
aligned_sam_output = align_reads_hisat(sample_name, split_file_names, alignment_output_dir)
elif parser_result.aligner.lower() == "subread":
aligned_sam_output = align_reads_subread(sample_name, split_file_names, alignment_output_dir)
else:
print("Aligner specified is not yet supported. Defaulting to STAR")
aligned_sam_output = align_reads_star(sample_name, split_file_names, alignment_output_dir)
aligned_output_filepath = "{}/{}".format(alignment_output_dir.rstrip("/"), aligned_sam_output)
aligned_output_hdfs_filepath = "{}/{}".format(HDFS_TEMP_OUTPUT_FOLDER, prefix)
subprocess.call(["hdfs", "dfs", "-rm", aligned_output_hdfs_filepath])
subprocess.call(["hdfs", "dfs", "-put", aligned_output_filepath, aligned_output_hdfs_filepath])
shutil.rmtree(alignment_dir, ignore_errors=True)
return sample_name, [prefix]
def fuse_alignment(keyval):
# Input: sample_name, [file_name,...] as key, val
# Output: sample_name
global parser_result
key, file_lists = keyval
fuse_alignment_dir = TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + key
ordered_file_lists = sorted([(f, int(f.rsplit("part", 1)[-1])) for f in file_lists], key=lambda x:x[-1])
print(ordered_file_lists)
try:
os.mkdir(fuse_alignment_dir)
except:
print('Fuse alignment directory {} exist.'.format(fuse_alignment_dir))
fuse_alignment_file = key + ".bam"
previous_file_path = ""
for index, file_name_pair in enumerate(ordered_file_lists):
file_name, number = file_name_pair
local_file_path = fuse_alignment_dir + "/" + file_name + ".bam"
subprocess.call(["hdfs", "dfs", "-get", HDFS_TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + file_name, local_file_path])
if index != 0:
new_merged_file_path = "{}/temp_{}.bam".format(fuse_alignment_dir, index)
subprocess.call(["samtools", "cat", "-o", new_merged_file_path, previous_file_path, local_file_path])
os.remove(previous_file_path)
os.remove(local_file_path)
previous_file_path = new_merged_file_path
else:
previous_file_path = local_file_path
subprocess.call(["hdfs", "dfs", "-rm", HDFS_TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + file_name])
if parser_result.output_dir.startswith("s3://"): # From S3
s3_client = boto3.client('s3', region_name=parser_result.aws_region)
print("uploading to S3")
output_bucket, key_prefix = parser_result.output_dir.strip().strip("/")[5:].split("/", 1)
s3_client.upload_file(previous_file_path, output_bucket, key_prefix + "/" + fuse_alignment_file)
else:
print("outputting to HDFS")
subprocess.call(["hdfs", "dfs", "-mkdir", "-p", parser_result.output_dir.rstrip("/")])
subprocess.call(["hdfs", "dfs", "-put", previous_file_path, parser_result.output_dir.rstrip("/") + "/" +
fuse_alignment_file])
os.remove(previous_file_path)
return key
if __name__ == "__main__":
global parser_result
parser = argparse.ArgumentParser(description='Spark-based RNA-seq Pipeline Alignment')
parser.add_argument('--input', '-i', action="store", dest="input_dir", help="Input directory - HDFS or S3")
parser.add_argument('--output', '-o', action="store", dest="output_dir", help="Output directory - HDFS or S3")
parser.add_argument('--aligner_tools', '-at', action="store", dest="aligner", nargs='?',
help="Aligner to be used (STAR|HISAT2|Subread)", default="STAR")
parser.add_argument('--aligner_extra_args', '-s', action="store", dest="aligner_extra_args", nargs='?',
help="Extra argument to be passed to alignment tool", default="")
parser.add_argument('--region', '-r', action="store", dest="aws_region", help="AWS region")
parser_result = parser.parse_args()
split_num = 0
conf = SparkConf().setAppName("Spark-based RNA-seq Pipeline Alignment")
sc = SparkContext(conf=conf)
if parser_result.input_dir.startswith("s3://"): # From S3
s3_client = boto3.client('s3', region_name=parser_result.aws_region)
# Get number of input files
s3_paginator = s3_client.get_paginator('list_objects')
input_bucket, key_prefix = parser_result.input_dir[5:].strip().split("/", 1)
input_file_num = 0
for result in s3_paginator.paginate(Bucket=input_bucket, Prefix=key_prefix):
for file in result.get("Contents"):
input_file_num += 1
if input_file_num == 0:
raise ValueError("Input directory is invalid or empty!")
split_num = input_file_num
else: # From HDFS
hdfs_process = Popen(shlex.split("hdfs dfs -count {}".format(parser_result.input_dir)),
stdout=PIPE, stderr=PIPE)
hdfs_out, hdfs_error = hdfs_process.communicate()
if hdfs_error:
raise ValueError("Input directory is invalid or empty!")
dir_count, file_count, size, path = hdfs_out.strip().split()
split_num = int(file_count)
subprocess.call(["hdfs", "dfs", "-mkdir", "-p", HDFS_TEMP_OUTPUT_FOLDER])
input_files = sc.wholeTextFiles(parser_result.input_dir, split_num)
aligned_files = input_files.map(alignment_step)
aligned_file_lists = aligned_files.reduceByKey(add)
aligned_samples = aligned_file_lists.map(fuse_alignment)
aligned_samples.collect()
|
VCCRI/Falco
|
source/spark_runner/run_pipeline_alignment.py
|
run_pipeline_alignment.py
|
py
| 15,126 |
python
|
en
|
code
| 37 |
github-code
|
6
|
[
{
"api_name": "sys.version",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pyspark.serializers",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "subprocess.call",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 356,
"usage_type": "argument"
}
] |
8385118921
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import subprocess
import optparse
from collections import namedtuple
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
import sumolib # noqa
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
from sumolib.output import parse_fast # noqa
TLTuple = namedtuple('TLTuple', ['edgeID', 'dist', 'time', 'connection'])
PairKey = namedtuple('PairKey', ['edgeID', 'edgeID2', 'dist'])
PairData = namedtuple('PairData', ['otl', 'oconnection', 'tl', 'connection', 'betweenOffset', 'startOffset',
'travelTime', 'prio', 'numVehicles', 'ogreen', 'green'])
def pair2str(p, full=True):
brief = "%s,%s s=%.1f b=%.1f t=%.1f" % (
p.otl.getID(), p.tl.getID(), p.startOffset, p.betweenOffset, p.travelTime)
if full:
return brief + " og=%s g=%s p=%s n=%s" % (p.ogreen, p.green, p.prio, p.numVehicles)
else:
return brief
def logAddedPair(TLSP, sets, operation):
print("added pair %s,%s with operation %s" %
(TLSP.otl.getID(), TLSP.tl.getID(), operation))
for s in sets:
print(" " + " ".join([pair2str(p, False) for p in s]))
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-n", "--net-file", dest="netfile",
help="define the net file (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
default="tlsOffsets.add.xml", help="define the output filename")
optParser.add_option("-r", "--route-file", dest="routefile",
help="define the inputroute file (mandatory)")
optParser.add_option("-a", "--additional-file", dest="addfile",
help="define replacement tls plans to be coordinated")
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
optParser.add_option("-i", "--ignore-priority", dest="ignorePriority", action="store_true",
default=False, help="Ignore road priority when sorting TLS pairs")
optParser.add_option("--speed-factor", type="float",
default=0.8, help="avg ration of vehicle speed in relation to the speed limit")
optParser.add_option("-e", "--evaluate", action="store_true",
default=False, help="run the scenario and print duration statistics")
(options, args) = optParser.parse_args(args=args)
if not options.netfile or not options.routefile:
optParser.print_help()
sys.exit()
return options
def locate(tlsToFind, sets):
"""return
- the set in which the given traffic light exists
- the pair in which it was found
- the index within the pair
"""
for s in sets:
for pair in s:
if tlsToFind == pair.otl:
return s, pair, 0
elif tlsToFind == pair.tl:
return s, pair, 1
return None, None, None
def coordinateAfterSet(TLSP, l1, l1Pair, l1Index):
# print "coordinateAfter\n TLSP: %s\n l1Pair: %s\n l1Index=%s" % (
# pair2str(TLSP), pair2str(l1Pair), l1Index)
if l1Index == 0:
TLSPdepart = l1Pair.startOffset - TLSP.ogreen
TLSParrival = TLSPdepart + TLSP.travelTime
TLSPstartOffset2 = TLSParrival - TLSP.green
TLSP = TLSP._replace(startOffset=l1Pair.startOffset,
betweenOffset=TLSPstartOffset2 - l1Pair.startOffset)
else:
l1depart = l1Pair.startOffset + l1Pair.betweenOffset + TLSP.ogreen
TLSParrival = l1depart + TLSP.travelTime
TLSPstartOffset = TLSParrival - TLSP.green
TLSP = TLSP._replace(
startOffset=l1depart, betweenOffset=TLSPstartOffset - l1depart)
l1.append(TLSP)
return TLSP
def coordinateBeforeSet(TLSP, l2, l2Pair, l2Index):
# print "coordinateBeforeSet\n TLSP: %s\n l2Pair: %s\n l2Index=%s" % (
# pair2str(TLSP), pair2str(l2Pair), l2Index)
if l2Index == 0:
l2arrival = l2Pair.startOffset + TLSP.green
TLSPdepart = l2arrival - TLSP.travelTime
TLSPstartOffset = TLSPdepart - TLSP.ogreen
TLSP = TLSP._replace(
startOffset=TLSPstartOffset, betweenOffset=l2Pair.startOffset - TLSPstartOffset)
else:
l2arrival = l2Pair.startOffset + l2Pair.betweenOffset + TLSP.green
TLSPdepart = l2arrival - TLSP.travelTime
TLSPstartOffset = TLSPdepart - TLSP.ogreen
TLSP = TLSP._replace(
startOffset=TLSPstartOffset, betweenOffset=l2arrival - TLSPstartOffset)
l2.append(TLSP)
return TLSP
def computePairOffsets(TLSPList, verbose):
c1, c2, c3, c4, c5 = 0, 0, 0, 0, 0
sets = [] # sets of coordinate TLPairs
operation = ""
for TLSP in TLSPList:
l1, l1Pair, l1Index = locate(TLSP.otl, sets)
l2, l2Pair, l2Index = locate(TLSP.tl, sets)
# print(l1)
if l1 is None and l2 is None:
# new set
newlist = []
newlist.append(TLSP)
sets.append(newlist)
c1 += 1
operation = "newSet"
elif l2 is None and l1 is not None:
# add to set 1 - add after existing set
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
c2 += 1
operation = "addAfterSet"
elif l1 is None and l2 is not None:
# add to set 2 - add before existing set
TLSP = coordinateBeforeSet(TLSP, l2, l2Pair, l2Index)
c3 += 1
operation = "addBeforeSet"
else:
if l1 == l2:
# cannot uncoordinated both tls. coordinate the first
# arbitrarily
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
c4 += 1
operation = "addHalfCoordinated"
else:
# merge sets
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
if verbose:
logAddedPair(TLSP, sets, "addAfterSet (intermediate)")
# print "merge\n TLSP: %s\n l1Pair: %s\n l1Index=%s\n l2Pair: %s\n l2Index=%s" % (
# pair2str(TLSP), pair2str(l1Pair), l1Index, pair2str(l2Pair),
# l2Index)
if l2Index == 0:
dt = TLSP.startOffset + \
TLSP.betweenOffset - l2Pair.startOffset
else:
dt = TLSP.startOffset + TLSP.betweenOffset - \
(l2Pair.startOffset + l2Pair.betweenOffset)
merge(sets, l1, l2, dt)
c5 += 1
operation = "mergeSets"
if verbose:
logAddedPair(TLSP, sets, operation)
print("operations: newSet=%s addToSet=%s addToSet2=%s addHalfCoordinated=%s mergeSets=%s" % (
c1, c2, c3, c4, c5))
return(sets)
def merge(sets, list1, list2, dt):
for elem in list2:
list1.append(elem._replace(startOffset=elem.startOffset + dt))
sets.remove(list2)
def finalizeOffsets(sets):
offsetDict = {}
for singleSet in sets:
singleSet.sort(
key=lambda pd: (pd.prio, pd.numVehicles / pd.travelTime), reverse=True)
for pair in singleSet:
# print " %s,%s:%s,%s" % (pair.otl.getID(), pair.tl.getID(),
# pair.startOffset, pair.betweenOffset)
tl1 = pair.otl.getID()
tl2 = pair.tl.getID()
betweenOffset = pair.betweenOffset
startOffset = pair.startOffset
if tl1 not in offsetDict:
# print " added %s offset %s" % (tl1, startOffset)
offsetDict[tl1] = startOffset
if tl2 not in offsetDict:
# print " added %s offset %s" % (tl2, startOffset +
# betweenOffset)
offsetDict[tl2] = startOffset + betweenOffset
return offsetDict
def getTLSInRoute(net, edge_ids):
rTLSList = [] # list of traffic lights along the current route
dist = 0
time = 0
for edgeID, nextEdgeID in zip(edge_ids[:-1], edge_ids[1:]):
edge = net.getEdge(edgeID)
nextEdge = net.getEdge(nextEdgeID)
connection = edge.getOutgoing()[nextEdge][0]
TLS = edge.getTLS()
dist += edge.getLength()
time += edge.getLength() / edge.getSpeed()
alreadyFound = [item for item in rTLSList if item[0] == edgeID]
if TLS and not alreadyFound:
rTLSList.append(TLTuple(edgeID, dist, time, connection))
dist = 0
time = 0
return rTLSList
def getFirstGreenOffset(tl, connection):
index = connection._tlLink
tlp = tl.getPrograms()
if len(tlp) != 1:
raise RuntimeError("Found %s programs for tl %s" %
(len(tlp), connection._tls))
phases = list(tlp.values())[0].getPhases()
start = 0
for p in phases:
if p.state[index] in ['G', 'g']:
return start
else:
start += p.duration
raise RuntimeError(
"No green light for tlIndex %s at tl %s" % (index, connection._tls))
def getTLPairs(net, routeFile, speedFactor, ignorePriority):
# pairs of traffic lights
TLPairs = {} # PairKey -> PairData
for route in parse_fast(routeFile, 'route', ['edges']):
rTLSList = getTLSInRoute(net, route.edges.split())
for oldTL, TLelement in zip(rTLSList[:-1], rTLSList[1:]):
key = PairKey(oldTL.edgeID, TLelement.edgeID, oldTL.dist)
numVehicles = 0 if key not in TLPairs else TLPairs[key].numVehicles
tl = net.getEdge(TLelement.edgeID).getTLS()
otl = net.getEdge(oldTL.edgeID).getTLS()
edge = net.getEdge(TLelement.edgeID)
connection = TLelement.connection
oconnection = oldTL.connection
ogreen = getFirstGreenOffset(otl, oconnection)
green = getFirstGreenOffset(tl, connection)
travelTime = TLelement.time / speedFactor
betweenOffset = travelTime + ogreen - green
startOffset = 0
# relevant data for a pair of traffic lights
prio = 1 if ignorePriority else edge.getPriority()
TLPairs[key] = PairData(otl, oconnection, tl, connection, betweenOffset, startOffset, travelTime,
prio, numVehicles + 1, ogreen, green)
return TLPairs
def removeDuplicates(TLPairs):
# @todo: for multiple pairs with the same edges but different dist, keep only the one with the largest numVehicles
return TLPairs
def main(options):
net = sumolib.net.readNet(options.netfile, withLatestPrograms=True)
if options.addfile is not None:
sumolib.net.readNet(options.addfile, withLatestPrograms=True, net=net)
TLPairs = getTLPairs(net, options.routefile, options.speed_factor, options.ignorePriority)
TLPairs = removeDuplicates(TLPairs)
sortHelper = [(
(pairData.prio, pairData.numVehicles / pairData.travelTime), # sortKey
(pairKey, pairData)) # payload
for pairKey, pairData in TLPairs.items()]
tlPairsList = [
value for sortKey, value in sorted(sortHelper, reverse=True)]
print("number of tls-pairs: %s" % len(tlPairsList))
if options.verbose:
print('\n'.join(["edges=%s,%s prio=%s numVehicles/time=%s" % (
pairKey.edgeID, pairKey.edgeID2, pairData.prio, pairData.numVehicles / pairData.travelTime)
for pairKey, pairData in tlPairsList]))
coordinatedSets = computePairOffsets(
[pairData for pairKey, pairData in tlPairsList], options.verbose)
offsetDict = finalizeOffsets(coordinatedSets)
with open(options.outfile, 'w') as outf:
outf.write('<additional>\n')
for ID, startOffset in sorted(offsetDict.items()):
programID = list(net.getTLSSecure(ID).getPrograms().keys())[0]
outf.write(' <tlLogic id="%s" programID="%s" offset="%.2f"/>\n' %
(ID, programID, startOffset))
outf.write('</additional>\n')
sumo = sumolib.checkBinary('sumo')
if options.evaluate:
additionals = [options.outfile]
if options.addfile:
additionals = [options.addfile] + additionals
subprocess.call([sumo,
'-n', options.netfile,
'-r', options.routefile,
'-a', ','.join(additionals),
'-v', '--no-step-log', '--duration-log.statistics'], stdout=sys.stdout)
if __name__ == "__main__":
options = get_options(sys.argv)
main(options)
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/tlsCoordinator.py
|
tlsCoordinator.py
|
py
| 12,854 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "optparse.OptionParser",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sumolib.output.parse_fast",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "sumolib.net.readNet",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "sumolib.net",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "sumolib.net.readNet",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "sumolib.net",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "sumolib.checkBinary",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 330,
"usage_type": "attribute"
}
] |
30364393271
|
import os.path
import shutil
import sys
import tempfile
import textwrap
import testfixtures
from okonomiyaki.file_formats import EggMetadata, PackageInfo
from okonomiyaki.utils.test_data import NOSE_1_3_4_RH5_X86_64
from okonomiyaki._cli import main
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestMain(unittest.TestCase):
maxDiff = None
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_spec_depend(self):
# Given
egg = NOSE_1_3_4_RH5_X86_64
r_output = textwrap.dedent("""\
metadata_version = '1.3'
name = 'nose'
version = '1.3.4'
build = 1
arch = 'amd64'
platform = 'linux2'
osdist = 'RedHat_5'
python = '2.7'
python_tag = 'cp27'
abi_tag = 'cp27m'
platform_tag = 'linux_x86_64'
packages = []
""")
# When
with testfixtures.OutputCapture() as capture:
main(["spec-depend", egg])
# Then
self.assertMultiLineEqual(capture.output.getvalue(), r_output)
def test_pkg_info(self):
# Given
egg = NOSE_1_3_4_RH5_X86_64
r_output = PackageInfo.from_egg(egg).to_string()
# When
with testfixtures.OutputCapture() as capture:
main(["pkg-info", egg])
# Then
self.assertMultiLineEqual(capture.output.getvalue(), r_output)
def test_summary(self):
# Given
egg = NOSE_1_3_4_RH5_X86_64
r_output = textwrap.dedent("""\
Extends the Python Unittest module with additional disocvery and running
options
""")
# When
with testfixtures.OutputCapture() as capture:
main(["summary", egg])
# Then
self.assertMultiLineEqual(capture.output.getvalue(), r_output)
def test_no_pkg_info(self):
# Given
path = os.path.join(
self.tempdir, os.path.basename(NOSE_1_3_4_RH5_X86_64)
)
m = EggMetadata.from_egg(NOSE_1_3_4_RH5_X86_64)
m._pkg_info = None
m.dump(path)
# When/Then
with testfixtures.OutputCapture() as capture:
with self.assertRaises(SystemExit) as exc:
main(["pkg-info", path])
if sys.version_info < (2, 7):
code = exc.exception
else:
code = exc.exception.code
self.assertEqual(code, -1)
capture.compare("No PKG-INFO")
|
enthought/okonomiyaki
|
okonomiyaki/_cli/tests/test_cli.py
|
test_cli.py
|
py
| 2,580 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.version_info",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.utils.test_data.NOSE_1_3_4_RH5_X86_64",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "textwrap.dedent",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "testfixtures.OutputCapture",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "okonomiyaki._cli.main",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.utils.test_data.NOSE_1_3_4_RH5_X86_64",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "okonomiyaki.file_formats.PackageInfo.from_egg",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.file_formats.PackageInfo",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "testfixtures.OutputCapture",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "okonomiyaki._cli.main",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.utils.test_data.NOSE_1_3_4_RH5_X86_64",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "textwrap.dedent",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "testfixtures.OutputCapture",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "okonomiyaki._cli.main",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "os.path.path.basename",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.utils.test_data.NOSE_1_3_4_RH5_X86_64",
"line_number": 89,
"usage_type": "argument"
},
{
"api_name": "os.path.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "okonomiyaki.file_formats.EggMetadata.from_egg",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "okonomiyaki.utils.test_data.NOSE_1_3_4_RH5_X86_64",
"line_number": 91,
"usage_type": "argument"
},
{
"api_name": "okonomiyaki.file_formats.EggMetadata",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "testfixtures.OutputCapture",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "okonomiyaki._cli.main",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 99,
"usage_type": "attribute"
}
] |
23065976802
|
import numpy as np
import os, sys, math
import pandas as pd
import dash
#import dash_core_components as dcc
from dash import dcc
#import dash_html_components as html
from dash import html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
class Obstacle():
def __init__(self, df, dataset, frame_name):
self.df = df
self.coordinate_system = dataset.coordinate_system
if 'id' in df.keys():
self.id = df['id']
else:
self.id = -1.
if self.coordinate_system == 'camera_coordinate_system':
self.x_center, self.y_center, self.z_center, self.yaw = dataset.project_center_camera_to_lidar(frame_name, df['x'], df['y'], df['z'], df['yaw'])
elif self.coordinate_system == 'lidar_coordinate_system':
self.x_center = df['x']
self.y_center = df['y']
self.z_center = df['z']
self.yaw = df['yaw']
else:
print("Coordinate System: {} NOT implemented!".format(self.coordinate_system))
sys.exit(1)
self.w = df['w']
self.l = df['l']
self.h = df['h']
if 'score' in df.keys():
self.score = df['score']
else:
self.score = -1.
self.label = df['label']
def print_obstacle(self):
print('------')
print(self.df)
print('------\n')
################################ 3D BOXES ################################
def return_vertex(df, dataset, frame_name):
all_vertex = []
all_labels = []
all_obstacles = []
for i in range(len(df)):
# Parser obstacle
obstacle = Obstacle(df.iloc[int(i)], dataset, frame_name)
#obstacle.print_obstacle()
id_box = int(obstacle.id)
x_center = obstacle.x_center
y_center = obstacle.y_center
z_center = obstacle.z_center
yaw = obstacle.yaw
w_half = obstacle.w / 2.
l_half = obstacle.l / 2.
h = obstacle.h
# Construir vertices
point_A_x = (x_center - l_half * math.cos(-yaw) - w_half * math.sin(-yaw))
point_A_y = (y_center + l_half * math.sin(-yaw) - w_half * math.cos(-yaw))
# Get B point
point_B_x = (x_center + l_half* math.cos(-yaw) - w_half * math.sin(-yaw))
point_B_y = (y_center - l_half* math.sin(-yaw) - w_half * math.cos(-yaw))
# Get C point
point_C_x = (x_center + l_half * math.cos(-yaw) + w_half * math.sin(-yaw))
point_C_y = (y_center - l_half * math.sin(-yaw) + w_half * math.cos(-yaw))
# Get D point
point_D_x = (x_center - l_half * math.cos(-yaw) + w_half * math.sin(-yaw))
point_D_y = (y_center + l_half * math.sin(-yaw) + w_half * math.cos(-yaw))
vertices = np.array([
[point_A_x, point_A_y, z_center],
[point_B_x, point_B_y, z_center],
[point_C_x, point_C_y, z_center],
[point_D_x, point_D_y, z_center],
[point_A_x, point_A_y, z_center+h],
[point_B_x, point_B_y, z_center+h],
[point_C_x, point_C_y, z_center+h],
[point_D_x, point_D_y, z_center+h]
])
indices = np.array([
[0, 1, 2, 3],
[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[3, 0, 4, 7],
[4, 5, 6, 7]
])
all_vertex.append(vertices)
all_labels.append('{}-{}: {:.3f}'.format(obstacle.label, id_box, obstacle.score))
return all_vertex, all_labels
def draw_annotations_frame(dataset, frame_list, frame, fig):
if frame_list is None: return fig
df = pd.read_csv(os.path.join(dataset.annotations_data_path, frame_list[frame]), delimiter=' ', names=dataset.annotations_format)
# Calcular los vertices de la caja
all_vertex, all_labels = return_vertex(df, dataset, frame_list[frame])
for i, _ in enumerate(all_vertex):
vertices = all_vertex[i]
label = all_labels[i]
faces = go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],
name=label,
opacity=0.3
)
fig.add_trace(faces)
return fig
################################ LiDAR ################################
def draw_lidar(dataset, lidar_frame_list, frame, lidar_res):
filename = os.path.join(dataset.lidar_data_path, lidar_frame_list[frame])
points = dataset.load_lidar(filename)
PC_scatter = go.Scatter3d(
x=points["x"],
y=points["y"],
z=points["z"],
mode='markers',
marker=dict(
size=lidar_res,
color=[0,0,0],
opacity=0.3
)
)
return PC_scatter
|
ArmanAstud/3D_detection_visualizer
|
scripts/utils.py
|
utils.py
|
py
| 4,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.exit",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs.Mesh3d",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "plotly.graph_objs.Scatter3d",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 145,
"usage_type": "name"
}
] |
36466168685
|
#!/usr/bin/env python
import utils
import gzip
import argparse
from pysam import TabixFile
import numpy as np
import glob
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-r',
dest='rate_dir',
required=True,
help='Path to directory containing rate files')
args = parser.parse_args()
return args
def main():
args = get_args()
regions = []
first_file = True
file_i = 0
for rate_file in glob.glob(args.rate_dir + '*.probe.coverage_rate.bed.gz'):
with gzip.open(rate_file,'rt') as f:
line_i = 0
for l in f:
A = l.rstrip().split('\t')
region = utils.Interval(chrom=A[0],
start=int(A[1]),
end=int(A[2]),
data=A[3:] if len(A) > 3 else None)
if region.start == region.end:
continue
interval = utils.Interval(chrom=A[0],
start=int(A[1]),
end=int(A[2]),
data=A[3:] if len(A) > 3 else None)
if first_file:
regions.append([interval])
else:
regions[line_i].append(interval)
line_i += 1
first_file = False
file_i += 1
for region in regions:
depths = []
for interval in region:
depths.append(float(interval.data[1]))
print('\t'.join([str(x) for x in [region[0].chrom,
region[0].start,
region[0].end,
region[0].data[0],
np.mean(depths),
np.std(depths)]]))
if __name__ == '__main__': main()
|
ryanlayerlab/layer_lab_chco
|
bin/get_regions_zscores.py
|
get_regions_zscores.py
|
py
| 2,039 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.Interval",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.Interval",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 68,
"usage_type": "call"
}
] |
35041229962
|
from toscaparser.imports import ImportsLoader
from configuration_tool.common import utils
from configuration_tool.common.configuration import Configuration
from configuration_tool.common.tosca_reserved_keys import *
from configuration_tool.providers.common.provider_configuration import ProviderConfiguration
from configuration_tool.providers.common.provider_resource import ProviderResource
import os, copy, logging, sys
SEPARATOR = ':'
class ProviderToscaTemplate(object):
REQUIRED_CONFIG_PARAMS = (TOSCA_ELEMENTS_MAP_FILE, TOSCA_ELEMENTS_DEFINITION_FILE)
DEPENDENCY_FUNCTIONS = (GET_PROPERTY, GET_ATTRIBUTE, GET_OPERATION_OUTPUT)
DEFAULT_ARTIFACTS_DIRECTOR = ARTIFACTS
def __init__(self, template, provider, configuration_tool, cluster_name, host_ip_parameter, is_delete,
grpc_cotea_endpoint):
self.host_ip_parameter = host_ip_parameter
self.provider = provider
self.grpc_cotea_endpoint = grpc_cotea_endpoint
self.is_delete = is_delete
self.configuration_tool = configuration_tool
self.provider_config = ProviderConfiguration(self.provider)
self.base_config = Configuration()
self.cluster_name = cluster_name
self.software_types = set()
for sec in self.REQUIRED_CONFIG_PARAMS:
if not self.provider_config.config[self.provider_config.MAIN_SECTION].get(sec):
logging.error("Provider configuration parameter \'%s\' has missing value" % sec)
logging.error("Translating failed")
raise Exception("Provider configuration parameter \'%s\' has missing value" % sec)
self.definitions = {}
import_definition_file = ImportsLoader([self.definition_file()], None, list(SERVICE_TEMPLATE_KEYS),
template.get(TOPOLOGY_TEMPLATE))
self.definitions.update(import_definition_file.get_custom_defs())
import_definition_file = ImportsLoader(self.base_definition_file(), None, list(SERVICE_TEMPLATE_KEYS),
template.get(TOPOLOGY_TEMPLATE))
self.definitions.update(import_definition_file.get_custom_defs())
self.definitions.update(template.get(NODE_TYPES, {}))
self.definitions.update(template.get(RELATIONSHIP_TYPES, {}))
self.definitions.update(template.get(CAPABILITY_TYPES, {}))
self.definitions.update(template.get(DATA_TYPES, {}))
self.definitions.update(template.get(POLICY_TYPES, {}))
self.definitions.update(template.get(GROUP_TYPES, {}))
self.definitions.update(template.get(INTERFACE_TYPES, {}))
self.fulfil_definitions_with_parents()
self.node_templates = {}
self.relationship_templates = {}
self.inputs = {}
self.outputs = {}
if template.get(TOPOLOGY_TEMPLATE).get(NODE_TEMPLATES):
self.node_templates = template.get(TOPOLOGY_TEMPLATE)[NODE_TEMPLATES]
if template.get(TOPOLOGY_TEMPLATE).get(RELATIONSHIP_TEMPLATES):
self.relationship_templates = template.get(TOPOLOGY_TEMPLATE)[RELATIONSHIP_TEMPLATES]
if template.get(TOPOLOGY_TEMPLATE).get(OUTPUTS):
self.outputs = template.get(TOPOLOGY_TEMPLATE)[OUTPUTS]
if template.get(TOPOLOGY_TEMPLATE).get(INPUTS):
self.inputs = template.get(TOPOLOGY_TEMPLATE)[INPUTS]
self.configuration_content = None
self.configuration_ready = None
self.template_dependencies = dict()
self._relation_target_source = dict()
self.resolve_in_template_dependencies()
# After this step self.node_templates has requirements with node_filter parameter
self.replace_requirements_with_node_filter()
self.provider_nodes = self._provider_nodes()
self.provider_relations = self._provider_relations()
self.provider_operations, self.reversed_provider_operations = self.sort_nodes_and_operations_by_graph_dependency()
def resolve_in_template_dependencies(self):
"""
TODO think through the logic to replace mentions by id
Changes all mentions of node_templates by name in requirements, places dictionary with node_filter instead
:return:
"""
for node_name, node in self.node_templates.items():
for req in node.get(REQUIREMENTS, []):
for req_name, req_body in req.items():
# Valid keys are ('node', 'node_filter', 'relationship', 'capability', 'occurrences')
# Only node and relationship might be a template name or a type
req_relationship = req_body.get(RELATIONSHIP)
req_node = req_body.get(NODE)
if req_relationship is not None:
(_, _, type_name) = utils.tosca_type_parse(req_relationship)
if type_name is None:
self.add_template_dependency(node_name, req_relationship)
self._relation_target_source[req_relationship] = {
'source': node_name,
'target': req_node
}
if req_node is not None:
(_, _, type_name) = utils.tosca_type_parse(req_node)
if type_name is None:
self.add_template_dependency(node_name, req_node)
node_types_from_requirements = set()
req_definitions = self.definitions[node[TYPE]].get(REQUIREMENTS, [])
for req in req_definitions:
for req_name, req_def in req.items():
if req_def.get(NODE, None) is not None:
if req_def[NODE] != node[TYPE]:
node_types_from_requirements.add(req_def[NODE])
for req_node_name, req_node_tmpl in self.node_templates.items():
if req_node_tmpl[TYPE] in node_types_from_requirements:
self.add_template_dependency(node_name, req_node_name)
def add_template_dependency(self, node_name, dependency_name):
if not dependency_name == SELF and not node_name == dependency_name:
if self.template_dependencies.get(node_name) is None:
self.template_dependencies[node_name] = {dependency_name}
else:
self.template_dependencies[node_name].add(dependency_name)
def base_definition_file(self):
file_definitions = self.base_config.config['main'][TOSCA_ELEMENTS_DEFINITION_FILE].split(',')
def_list = []
for file_definition in file_definitions:
if not os.path.isabs(file_definition):
file_definition = os.path.join(utils.get_project_root_path(), file_definition)
def_list.append(file_definition)
if not os.path.isfile(file_definition):
logging.error("TOSCA definition file not found: %s" % file_definition)
raise Exception("TOSCA definition file not found: %s" % file_definition)
return def_list
def definition_file(self):
file_definition = self.provider_config.config['main'][TOSCA_ELEMENTS_DEFINITION_FILE]
if not os.path.isabs(file_definition):
file_definition = os.path.join(self.provider_config.config_directory, file_definition)
if not os.path.isfile(file_definition):
logging.error("TOSCA definition file not found: %s" % file_definition)
raise Exception("TOSCA definition file not found: %s" % file_definition)
return file_definition
def replace_requirements_with_node_filter(self):
for node_name, node in self.node_templates.items():
for req in node.get(REQUIREMENTS, []):
for req_name, req_body in req.items():
if req_body.get(NODE):
node_tmpl = self.node_templates.get(req_body[NODE])
node_filter = dict()
properties = node_tmpl.get(PROPERTIES)
props_list = []
if properties:
for prop_name, prop in properties.items():
props_list.append({prop_name: prop})
capabilities = node_tmpl.get(CAPABILITIES)
caps_list = []
if capabilities:
for cap_name, cap in capabilities.items():
cap_props = cap.get(PROPERTIES, {})
cap_props_list = []
for prop_name, prop in cap_props.items():
cap_props_list.append({prop_name, prop})
caps_list.append({PROPERTIES: cap_props_list})
if properties:
node_filter[PROPERTIES] = props_list
if capabilities:
node_filter[CAPABILITIES] = caps_list
req_body[NODE_FILTER] = node_filter
req[req_name] = req_body
def _provider_nodes(self):
"""
Create a list of ProviderResource classes to represent a node in TOSCA
:return: list of class objects inherited from ProviderResource
"""
provider_nodes = dict()
for node_name, node in self.node_templates.items():
(namespace, category, type_name) = utils.tosca_type_parse(node[TYPE])
is_software_component = node[TYPE] in self.software_types
if namespace != self.provider and not is_software_component or category != NODES:
logging.error('Unexpected values: node \'%s\' not a software component and has a provider \'%s\'. '
'Node will be ignored' % (node.name, namespace))
else:
provider_node_instance = ProviderResource(self.provider, self.is_delete, self.grpc_cotea_endpoint, self.configuration_tool, node,
node_name,
self.host_ip_parameter, self.definitions[node[TYPE]],
is_software_component=is_software_component)
provider_nodes[node_name] = provider_node_instance
return provider_nodes
def _provider_relations(self):
provider_relations = dict()
for rel_name, rel_body in self.relationship_templates.items():
provider_rel_instance = ProviderResource(self.provider, self.is_delete, self.grpc_cotea_endpoint, self.configuration_tool, rel_body,
rel_name,
self.host_ip_parameter, self.definitions[rel_body[TYPE]],
is_relationship=True,
relation_target_source=self._relation_target_source)
provider_relations[rel_name] = provider_rel_instance
return provider_relations
def _provider_nodes_by_name(self):
"""
Get provider_nodes_by_name
:return: self.provider_nodes_by_name
"""
provider_nodes_by_name = dict()
for node in self.provider_nodes:
provider_nodes_by_name[node.nodetemplate.name] = node
return provider_nodes_by_name
def sort_nodes_and_operations_by_graph_dependency(self):
"""
This method generates dict fith ProviderTemplates with operation, sorted by
dependencies from normative and provider TOSCA templates
"""
nodes = set(self.provider_nodes.keys())
nodes = nodes.union(set(self.provider_relations.keys()))
dependencies = {}
lifecycle = ['configure', 'start', 'stop'] # ['delete'] now we cant support deleting while creating,
# deleting operations executes only when --delete option activated
reversed_full_lifecycle = lifecycle[::-1] + ['create']
# generate only dependencies from nodes
for templ_name in nodes:
set_intersection = nodes.intersection(self.template_dependencies.get(templ_name, set()))
templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
(_, element_type, _) = utils.tosca_type_parse(templ.type)
if element_type == NODES:
if INTERFACES in templ.tmpl and 'Standard' in templ.tmpl[INTERFACES]:
new_operations = ['create']
# operation create always exists
for elem in lifecycle:
if elem in templ.tmpl[INTERFACES]['Standard']:
new_operations.append(elem)
# if there is any other operations - add ti new_operations and translate to dict
# in format {node.op: {node1, node2}}
# node requieres node1 and node2
if len(new_operations) == 1:
utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
else:
for i in range(1, len(new_operations)):
utils.deep_update_dict(dependencies, {
templ_name + SEPARATOR + new_operations[i]: {
templ_name + SEPARATOR + new_operations[i - 1]}})
utils.deep_update_dict(dependencies,
{templ_name + SEPARATOR + new_operations[0]: set_intersection})
else:
utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
new_dependencies = {}
# new_dependencies is needed for updating set operations
# dict must be in format {node.op: {node1, node2}}
for key, value in dependencies.items():
new_set = set()
for elem in value:
for oper in reversed_full_lifecycle:
if elem + SEPARATOR + oper in dependencies:
new_set.add(elem + SEPARATOR + oper)
break
elif elem in dependencies:
new_set.add(elem)
break
new_dependencies[key] = new_set
# Adding relationships operations pre_configure_source after create source node
# pre_configure_target after create target node
# add_source in parallel with pre_configure_source but in will be executed on target
# post_configure_target after configure target node (if not configure then create - in parallel
# with pre_configure_target)
# post_configure_source after configure target node (if not configure then create - in parallel
# with pre_configure_source)
# other - not supported!
for templ_name in nodes:
templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
(_, element_type, _) = utils.tosca_type_parse(templ.type)
if element_type == RELATIONSHIPS:
if INTERFACES in templ.tmpl and 'Configure' in templ.tmpl[INTERFACES]:
if 'pre_configure_source' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'pre_configure_source', 'create', ['add_source'])
if 'pre_configure_target' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'pre_configure_target', 'create')
if 'post_configure_source' in templ.tmpl[INTERFACES]['Configure']:
if templ.source + SEPARATOR + 'configure' in new_dependencies:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'post_configure_source', 'configure')
else:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'post_configure_source', 'create')
if 'post_configure_target' in templ.tmpl[INTERFACES]['Configure']:
if templ.target + SEPARATOR + 'configure' in new_dependencies:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'post_configure_target', 'configure')
else:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'post_configure_target', 'create')
if 'add_source' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'add_source', 'create', ['pre_configure_source'])
if 'add_target' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation add_target not supported, it will be skipped')
if 'target_changed' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation target_changed not supported, it will be skipped')
if 'remove_target' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation remove_target not supported, it will be skipped')
# mapping strings 'node.op' to provider template of this node with this operation
templ_mappling = {}
for elem in new_dependencies:
templ_name = elem.split(SEPARATOR)[0]
templ = copy.deepcopy(self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name)))
templ.operation = elem.split(SEPARATOR)[1]
if INTERFACES in templ.tmpl:
if 'Configure' in templ.tmpl[INTERFACES]:
templ.tmpl[INTERFACES]['Configure'] = {templ.operation: templ.tmpl[INTERFACES]['Configure'][templ.operation]}
if 'Standard' in templ.tmpl[INTERFACES]:
templ.tmpl[INTERFACES]['Standard'] = {templ.operation: templ.tmpl[INTERFACES]['Standard'][templ.operation]}
templ_mappling[elem] = templ
templ_dependencies = {}
reversed_templ_dependencies = {}
# create dict where all elements will be replaced with provider template from templ_mappling
# reversed_templ_dependencies needed for delete - it just a reversed version of graph
for key, value in new_dependencies.items():
new_list = []
for elem in value:
new_list.append(templ_mappling[elem])
if templ_mappling[elem] not in reversed_templ_dependencies:
reversed_templ_dependencies[templ_mappling[elem]] = [templ_mappling[key]]
elif templ_mappling[key] not in reversed_templ_dependencies[templ_mappling[elem]]:
reversed_templ_dependencies[templ_mappling[elem]].append(templ_mappling[key])
templ_dependencies[templ_mappling[key]] = new_list
if len(templ_dependencies) <= 1:
reversed_templ_dependencies = copy.copy(templ_dependencies)
return templ_dependencies, reversed_templ_dependencies
def update_relationships(self, new_dependencies, templ_name, direction, rel_name, post_op, banned_ops=[]):
utils.deep_update_dict(new_dependencies, {
templ_name + SEPARATOR + rel_name: {direction + SEPARATOR + post_op}})
for key, value in new_dependencies.items():
for elem in value:
if elem == direction + SEPARATOR + post_op and key != templ_name + SEPARATOR + rel_name and \
key not in [templ_name + SEPARATOR + x for x in banned_ops]:
utils.deep_update_dict(new_dependencies,
{key: {templ_name + SEPARATOR + rel_name}})
return new_dependencies
def _get_full_defintion(self, definition, def_type, ready_set):
if def_type in ready_set:
return definition, def_type in self.software_types
(_, _, def_type_short) = utils.tosca_type_parse(def_type)
is_software_type = def_type_short == 'SoftwareComponent'
is_software_parent = False
parent_def_name = definition.get(DERIVED_FROM, None)
if parent_def_name is not None:
if def_type == parent_def_name:
logging.critical("Invalid type \'%s\' is derived from itself" % def_type)
raise Exception("Invalid type \'%s\' is derived from itself" % def_type)
if parent_def_name in ready_set:
parent_definition = self.definitions[parent_def_name]
is_software_parent = parent_def_name in self.software_types
else:
parent_definition, is_software_parent = \
self._get_full_defintion(self.definitions[parent_def_name], parent_def_name, ready_set)
parent_definition = copy.deepcopy(parent_definition)
definition = utils.deep_update_dict(parent_definition, definition)
if is_software_type or is_software_parent:
self.software_types.add(def_type)
ready_set.add(def_type)
return definition, def_type in self.software_types
def fulfil_definitions_with_parents(self):
ready_definitions = set()
for def_name, definition in self.definitions.items():
self.definitions[def_name], _ = self._get_full_defintion(definition, def_name, ready_definitions)
if self.definitions[def_name].get(DERIVED_FROM):
del self.definitions[def_name][DERIVED_FROM]
|
sadimer/clouni_configuration_tool
|
configuration_tool/providers/common/tosca_template.py
|
tosca_template.py
|
py
| 22,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "configuration_tool.common",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "configuration_tool.providers.common.provider_configuration.ProviderConfiguration",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.configuration.Configuration",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "toscaparser.imports.ImportsLoader",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "toscaparser.imports.ImportsLoader",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "os.path.isabs",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "configuration_tool.common.utils.get_project_root_path",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path.isabs",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "configuration_tool.providers.common.provider_resource.ProviderResource",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "configuration_tool.providers.common.provider_resource.ProviderResource",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "logging.warning",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.utils.tosca_type_parse",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "logging.critical",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils.deep_update_dict",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "configuration_tool.common.utils",
"line_number": 381,
"usage_type": "name"
}
] |
43855241031
|
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torchvision
from torchvision import transforms, models, datasets
import imageio
import time
import warnings
import random
import sys
import copy
import json
from PIL import Image
####################################################################
# 把归一化处理过的图像数据恢复为[0,1]区间的数据,才能显示
def im_convert(tensor):
# 展示数据
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1, 2, 0)
# mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
image = image * np.array((0.229, 0.224, 0.225)) + \
np.array((0.485, 0.456, 0.406))
# 把低于0的值设置为0,超过1的数据设置为1
image = image.clip(0, 1)
return image
####################################################################
####################################################################
def set_parameter_requires_grad(a_model, bol_frozen_param):
if bol_frozen_param:
for param in a_model.parameters():
param.requires_grad = False
####################################################################
def initialize_model(model_name, num_classes, bol_frozen_nn_params, use_pretrained=True):
# 选择合适的模型,不同模型的初始化方法稍微有点区别
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet152
"""
model_ft = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
# 再根据
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102),
nn.LogSoftmax(dim=1))
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
data_dir = './flower_data/'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
# data_transforms是一个字典,记录对 [训练数据] 和 [验证数据] 的预处理的 操作
data_transforms = {
'train': transforms.Compose(
[transforms.RandomRotation(45), # 随机旋转,-45到45度之间随机选
transforms.CenterCrop(224), # 从中心开始裁剪
transforms.RandomHorizontalFlip(p=0.5), # 随机水平翻转 选择一个概率概率
transforms.RandomVerticalFlip(p=0.5), # 随机垂直翻转
# 参数1为亮度,参数2为对比度,参数3为饱和度,参数4为色相
transforms.ColorJitter(
brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),
transforms.RandomGrayscale(p=0.025), # 概率转换成灰度率,3通道就是R=G=B
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225]) # 均值,标准差
]),
'valid': transforms.Compose(
[transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
batch_size = 4
# image_datasets是一个字典,分别存放2个数据集的信息,包括图像数据和分类标签
image_datasets = {x: datasets.ImageFolder(os.path.join(
data_dir, x), data_transforms[x]) for x in ['train', 'valid']}
# 分别为 train 和 valid 两个数据集定义各自的 dataloader
dataloaders = {x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'valid']}
# 统计 训练集 和 验证集 的数据量
# dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
# class_ids是列表,例如:['1', '10', '100', '101', '102', '11', ...]
class_ids = image_datasets['train'].classes
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# 准备一个数据读取的迭代器
data_iter = iter(dataloaders['valid'])
# region 演示取一个batch的数据,并展示
# fig = plt.figure(figsize=(18, 10))
# columns = 3
# rows = 3
# # 取一个batch_size的数据.
# # 注意:category_ids存储的是类别在image_datasets['train'].classes列表中的序号,不是直接存类别编号
# inputs, category_ids = data_iter.next()
# for idx in range(columns*rows):
# ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
# ax.set_title(str(int(class_ids[category_ids[idx]])) + ':' +
# cat_to_name[str(int(class_ids[category_ids[idx]]))])
# plt.imshow(im_convert(inputs[idx]))
# plt.tight_layout()
# plt.show()
# endregion 演示取一个batch的数据,并展示
# 可选的比较多 ['resnet', 'alexnet', 'vgg', 'squeezenet', 'densenet', 'inception']
model_name = 'resnet'
# 是否用人家训练好的特征提取模型来做,也就是沿用别人的权重
bol_frozen_nn_param = True
# 是否用GPU训练
train_on_gpu = torch.cuda.is_available()
my_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = models.resnet152(pretrained=True)
model_ft, input_size = initialize_model(
model_name, 102, bol_frozen_nn_param, use_pretrained=True)
# GPU计算
model_ft = model_ft.to(my_device)
# 模型保存
filename = 'checkpoint.pth'
# 是否训练所有层
params_to_update = model_ft.parameters()
# print('params_to_update:\n', params_to_update)
# params_to_update = model_ft.named_parameters()
# print('params_to_update:\n', params_to_update)
print("Params to learn:")
if bol_frozen_nn_param:
params_to_update = []
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t", name)
# 优化器设置
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)
scheduler = optim.lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1) # 学习率每7个epoch衰减成原来的1/10
# 最后一层已经LogSoftmax()了,所以不能nn.CrossEntropyLoss()来计算了,nn.CrossEntropyLoss()相当于logSoftmax()和nn.NLLLoss()整合
criterion = nn.NLLLoss()
# 这里不用 criterion = nn.CrossEntropyLoss()
# =====================================================================================================
# =====================================================================================================
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, filename=filename):
since = time.time()
best_acc = 0
# region 加载模型
'''
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.class_to_idx = checkpoint['mapping']
'''
# endregion
model.to(my_device)
val_acc_history = []
train_acc_history = []
train_losses = []
valid_losses = []
LRs = [optimizer.param_groups[0]['lr']]
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 训练和验证
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # 训练
else:
model.eval() # 验证
running_loss = 0.0
running_corrects = 0
# 把数据都取个遍
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(my_device)
labels = labels.to(my_device)
# 清零
optimizer.zero_grad()
# 只有训练的时候计算和更新梯度
with torch.set_grad_enabled(phase == 'train'):
if is_inception and phase == 'train':
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else: # resnet执行的是这里
outputs = model(inputs)
loss = criterion(outputs, labels)
# torch.max(outputs, 1)返回每一行的最大值,以及最大值所在的列序号
# 预测值为分类在分类列表中的序号,标签值为分类在分类列表中的序号
pred_values, pred_idxs = torch.max(outputs, 1)
print('outputs:', outputs)
print('predict value:', pred_values)
print('prdict_category:', pred_idxs)
print('labels:', labels.data)
# 训练阶段更新权重
if phase == 'train':
loss.backward()
optimizer.step()
# 计算损失
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(pred_idxs == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double(
) / len(dataloaders[phase].dataset)
time_elapsed = time.time() - since
print('Time elapsed {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 得到最好那次的模型
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
state = {
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
if phase == 'valid':
val_acc_history.append(epoch_acc)
valid_losses.append(epoch_loss)
scheduler.step(epoch_loss)
if phase == 'train':
train_acc_history.append(epoch_acc)
train_losses.append(epoch_loss)
print('Optimizer learning rate : {:.7f}'.format(
optimizer.param_groups[0]['lr']))
LRs.append(optimizer.param_groups[0]['lr'])
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 训练完后用最好的一次当做模型最终的结果
model.load_state_dict(best_model_wts)
return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs
# =====================================================================================================
# 训练自定义的最后一层 ———— 全连接层
# model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(
# model_ft, dataloaders, criterion, optimizer_ft, num_epochs=1, is_inception=(model_name == "inception"))
# 把网络参数再设置为可学习的状态
for param in model_ft.parameters():
param.requires_grad = True
# 再继续训练所有的参数,学习率调小一点
optimizer = optim.Adam(params_to_update, lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# 损失函数
criterion = nn.NLLLoss()
# Load the checkpoint
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
#model_ft.class_to_idx = checkpoint['mapping']
# 再次训练,这次训练整个模型
# model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(
# model_ft, dataloaders, criterion, optimizer, num_epochs=1, is_inception=(model_name == "inception"))
# 得到一个batch的测试数据
dataiter = iter(dataloaders['valid'])
images, labels = dataiter.next()
# 训练完train_datasets之后,model要来测试样本了。在model(test_datasets)之前,需要加上model.eval().
# 否则的话,有输入数据,即使不训练,它也会改变权值。这是model中含有batch normalization层所带来的的性质。
model_ft.eval()
if train_on_gpu:
output = model_ft(images.cuda())
else:
output = model_ft(images)
predict_value, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(
preds_tensor.cpu().numpy())
print(predict_value)
print(preds)
print(labels)
# region 显示验证的图像和分类结果
fig = plt.figure(figsize=(18, 12))
columns = 2
rows = 2
for idx in range(columns*rows):
ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{} (label:{}/{})".format(cat_to_name[class_ids[int(preds[idx])]],
class_ids[labels[idx].item()], cat_to_name[class_ids[labels[idx].item()]]),
color=("green" if cat_to_name[str(preds[idx])] == cat_to_name[str(labels[idx].item())] else "red"))
plt.tight_layout()
plt.show()
# endregion
|
harryjd/keras_dogs_vs_cats
|
图像识别_仿写唐宇迪的例子.py
|
图像识别_仿写唐宇迪的例子.py
|
py
| 14,465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.models.resnet152",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torchvision.models.vgg16",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torchvision.models.inception_v3",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomRotation",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomVerticalFlip",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ColorJitter",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomGrayscale",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.resnet152",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.set_grad_enabled",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 376,
"usage_type": "name"
}
] |
35543810797
|
from django.urls import path
from . import views
app_name='cv'
urlpatterns = [
path('curriculo', views.index, name='index'),
path('curriculo/dados-pessoais', views.cadastrar_ou_aletarar_foto_e_objetivo, name='editar_dados'),
path('curriculo/educacao', views.cadastrar_educacao, name='educacao'),
path('curriculo/educacao/<id>/excluir', views.excluir_educacao, name='excluir_educacao'),
path('curriculo/experiencia', views.cadastrar_experiencia, name='experiencia'),
path('curriculo/experiencia/<id>/excluir', views.excluir_experiencia, name='excluir_experiencia'),
path('curriculo/visualizar', views.curriculo, name='curriculo'),
]
|
smctinf/casa_do_trabalhador
|
curriculo/urls.py
|
urls.py
|
py
| 662 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
}
] |
47533751
|
from typing import List
class Solution:
def maxUncrossedLines(self, nums1: List[int], nums2: List[int]) -> int:
# dp table
dp = [[0] * (len(nums2)+1) for _ in range(len(nums1)+1)]
# initialize
# pass
# traverse dp table
for i in range(1, len(nums1)+1):
for j in range(1, len(nums2)+1):
if nums1[i-1] == nums2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[-1][-1]
if __name__ == "__main__":
nums1 = [1,4,2]
nums2 = [1,2,4]
s = Solution()
assert s.maxUncrossedLines(nums1, nums2) == 2
|
code-cp/leetcode
|
solutions/1035/main.py
|
main.py
|
py
| 689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
25823990802
|
import numpy as np
import pygame
import constants
class Driver(object):
"""
This class implements the car's driver: visibility, controls etc.
"""
def __init__(self,
view_distance=constants.MAX_VIEW_DISTANCE,
view_resolution=constants.VIEW_RESOLUTION,
view_angle=constants.VIEW_ANGLE):
self.view_distance = view_distance
self.view_resolution = view_resolution
self.view_angle = view_angle
self.draw_visual = True
self.init_view()
self.error = 0.
def init_view(self):
"""
Initialize the driver's view.
"""
self.view_distances = np.linspace(constants.MIN_VIEW_DISTANCE,
self.view_distance,
self.view_resolution[1])
self.view_angles = np.linspace(-self.view_angle/2.,
self.view_angle/2.,
self.view_resolution[0]) * np.pi/180.
self.view_x = np.empty(self.view_resolution)
self.view_y = np.empty(self.view_resolution)
self.view_field = np.zeros(self.view_resolution)
def look(self, car, track):
"""
Evaluate the driver's view ahead.
"""
cos_angles = np.cos(car.direction + self.view_angles)
self.view_x = (car.rect.center[0]
+ np.outer(cos_angles, self.view_distances)
).astype(int)
sin_angles = np.sin(car.direction + self.view_angles)
self.view_y = (car.rect.center[1]
- np.outer(sin_angles, self.view_distances)
).astype(int)
# limit coordinates within track area (only for checking if off track)
x_matrix0 = np.where((self.view_x < 0) |
(self.view_x >= constants.WIDTH_TRACK),
0, self.view_x)
y_matrix0 = np.where((self.view_y < 0) |
(self.view_y >= constants.HEIGHT_TRACK),
0, self.view_y)
self.view_field[:] = track.off_track(x_matrix0, y_matrix0)
# block the view behind corners etc.
if constants.BLOCK_VIEW:
for ii in range(self.view_resolution[0]):
lineview = self.view_field[ii,:]
if np.any(lineview):
lineview[np.argmax(lineview):] = 1
def draw_viewfield(self, screen):
"""
Draw the field of view.
"""
for xx, yy, colind in zip(self.view_x.flatten(),
self.view_y.flatten(),
self.view_field.flatten()):
pygame.draw.circle(screen, constants.COLOR_VIEWFIELD[int(colind)], (xx, yy), 3)
def update(self, car, *args):
"""
Default actions for drivers.
"""
car.accelerate = constants.ALWAYS_FULLGAS
car.brake = False
car.turn_left = False
car.turn_right = False
class Player(Driver):
"""
This class implements the driver for the player car.
"""
def __init__(self, *args, **kwargs):
super(Player, self).__init__(*args, **kwargs)
def update(self, car):
"""
Read keyboard for controlling the player car.
"""
super(Player, self).update(car)
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
car.accelerate = True
if keys[pygame.K_DOWN]:
car.brake = True
if keys[pygame.K_LEFT]:
car.turn_left = True
if keys[pygame.K_RIGHT]:
car.turn_right = True
class AI_TIF(Driver):
"""
This class implements a simple AI driver that tries to keep most of
the track in front of its view field.
"""
def __init__(self, *args, **kwargs):
super(AI_TIF, self).__init__(*args, **kwargs)
# speed that still (kind of) allows a 90 degree turn
self.allowed_speed = constants.MAX_VIEW_DISTANCE / (
np.pi / (1.5 * constants.TURN_SPEED))
def update(self, car):
"""
The car turns depending on whether its closest side checks
are off track. Brake is applied if the car is going too fast
with wall in front, and especially if the corner is tight.
"""
# TODO: tuned for track and settings, generalize!
super(AI_TIF, self).update(car)
car.accelerate = True
if self.view_field[0,0] and not self.view_field[-1,0]:
car.turn_left = True
elif self.view_field[-1,0] and not self.view_field[0,0]:
car.turn_right = True
if self.view_field[self.view_resolution[0]//2, -1]:
car.brake = car.speed > self.allowed_speed
# special handling of tight corners
if not all(self.view_field[[0,-1], 1]) and car.speed > 1.:
car.brake = True
class ANN_Online(Driver):
"""
This class implements the AI driver for a neural network.
The network is trained online using stochastic gradient descent.
"""
def __init__(self,
n_hidden_neurons=5,
model_car=None,
learning_rate=0.2,
regularization=1.,
*args, **kwargs):
super(ANN_Online, self).__init__(*args, **kwargs)
self.model_car = model_car # the car to learn from
self.learning_rate = learning_rate
self.regularization = regularization
n_inputs = self.view_resolution[0] * self.view_resolution[1] + 1 # viewpoints + speed
n_outputs = 4 # accelerate, brake, left, right
self.ann = ann.ANN(n_inputs, n_hidden_neurons, n_outputs)
def update(self, own_car):
super(ANN_Online, self).update(own_car)
if constants.PLOT_ERROR:
self.evaluate_error()
self.learn()
inputs = self.prepare_inputs(own_car)
outputs = self.ann.feedforward(inputs)
self.process_output(outputs, own_car)
def learn(self):
model_inputs = self.prepare_inputs(self.model_car)
self.ann.train1(model_inputs, self.model_actions(),
self.learning_rate, self.regularization)
def prepare_inputs(self, car):
inputs = car.driver.view_field.flatten().astype(float)
# speed_transform = np.exp(-car.speed)
speed_transform = 1. / max(car.speed, 1.)
inputs = np.insert(inputs, 0, speed_transform, axis=0)
return inputs
def model_actions(self):
return np.array([self.model_car.accelerate,
self.model_car.brake,
self.model_car.turn_left,
self.model_car.turn_right]).astype(float)
def process_output(self, outputs, car):
threshold = 0.5
if outputs[0] > threshold:
car.accelerate = True
if outputs[1] > threshold:
car.brake = True
if outputs[2] > threshold:
car.turn_left = True
if outputs[3] > threshold:
car.turn_right = True
def evaluate_error(self):
"""
Evaluate the cost function with model input data.
"""
inputs = self.prepare_inputs(self.model_car)
outputs = self.ann.feedforward(inputs)
wanted = self.model_actions()
self.error = self.ann.cost(outputs, wanted)
class ANN_Batch(ANN_Online):
"""
This class implements the AI driver for a neural network.
The network is trained online using gradient descent with
a batch of accumulated samples.
"""
def __init__(self,
n_hidden_neurons=5,
model_car=None,
learning_rate=0.2,
regularization=0.1,
epochs=60,
mini_batch_size=100,
*args, **kwargs):
super(ANN_Batch, self).__init__(n_hidden_neurons, model_car,
learning_rate, regularization, *args, **kwargs)
self.epochs = epochs
self.mini_batch_size = mini_batch_size
self.reset_samples()
def learn(self):
"""
This method is called by the update method in the parent class.
Here we only spy the model car.
"""
self.input_samples.append(self.prepare_inputs(self.model_car))
self.output_samples.append(self.model_actions())
def train(self):
"""
Train the whole set of samples.
NOTE: May take a while and pause the game!
"""
print("Training {} samples for {} epochs in batches of {}".format(
len(self.input_samples), self.epochs, self.mini_batch_size))
self.ann.train_set(self.input_samples, self.output_samples,
self.learning_rate, self.regularization,
self.epochs, self.mini_batch_size)
self.reset_samples()
def reset_samples(self):
self.input_samples = []
self.output_samples = []
|
vuolleko/FormulaPF
|
driver.py
|
driver.py
|
py
| 9,079 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "constants.MAX_VIEW_DISTANCE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "constants.VIEW_RESOLUTION",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "constants.VIEW_ANGLE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "constants.MIN_VIEW_DISTANCE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.outer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.outer",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "constants.WIDTH_TRACK",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "constants.HEIGHT_TRACK",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "constants.BLOCK_VIEW",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.any",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.draw.circle",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "constants.COLOR_VIEWFIELD",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "constants.ALWAYS_FULLGAS",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "constants.MAX_VIEW_DISTANCE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "constants.TURN_SPEED",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "constants.PLOT_ERROR",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "numpy.insert",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 188,
"usage_type": "call"
}
] |
31373127129
|
# encoding=utf8
import datefinder
from datetime import datetime
import sys
import csv
import boto3
from data import Data
from PIL import Image
import pytesseract
import cv2
import os
import re
class TestData:
"""docstring for TestData"""
@staticmethod
def get():
data = Data()
data.set('Filename', 'in.jpg')
data.set('Extracted text', 'abc xxx def')
return data
@staticmethod
def extract(filename):
"""
with open('/home/ubuntu/date-extraction-from-image/credentials.csv', 'r') as input:
next(input)
reader = csv.reader(input)
for line in reader:
access_key_id = line[2]
secret_access_key = line[3]
"""
access_key_id = 'AKIA6LRPMXT6S5TPPDIO'
secret_access_key = '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'
client = boto3.client('rekognition',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key, region_name='us-east-1')
UPLOADED_FILE = '/tmp/'+filename
filename = "{}.png|jpeg|jpg".format(os.getpid())
with open(UPLOADED_FILE, 'rb') as source_image:
source_bytes = source_image.read()
response = client.detect_text(Image={'Bytes': source_bytes})
text = str(" ".join(re.findall(r"[a-z0-9\/\-\.\,]+", str(response), flags=re.I))).strip().title()
text = re.sub(r"([a-z]+)([0-9]+)", r"\1 \2", text, flags=re.I)
text = re.sub(r"([0-9]+)([a-z]+)", r"\1 \2", text, flags=re.I)
l1=[]
date=[]
opt = dict()
date_reg_exp2 = re.compile(r'detectedtext\s*[a-zA-Z0-9\s]{0,30}((?:(?:0[1-9]|1[0-9]|2[0-9]|3[0-1])(?:\D)(?:0[1-9]|1[0-2])(?:\D)(?:(?:19[7-9]\d|20\d{2})|\d{2}))|(?:(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|(Nov|Dec)(?:ember)?)(?:\D)(?:0[1-9]|1[0-9]|2[0-9]|3[0-1])(?:\D)(?:(?:19[7-9]\d|20\d{2})|\d{2}))|(?:(?:0[1-9]|1[0-2])(?:\D)(?:0[1-9]|1[0-9]|2[0-9]|3[0-1])(?:\D)(?:(?:19[7-9]\d|20\d{2})|\d{2}))|(?:(?:0[1-9]|1[0-9]|2[0-9]|3[0-1])(?:\D)(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|(Nov|Dec)(?:ember)?)(?:\D)(?:(?:19[7-9]\d|20\d{2})|\d{2})))',flags=re.I)
line = re.search(date_reg_exp2, str(text))
if line:
l1 =list(filter(None,line.groups()))
newDate = [ x for x in datefinder.find_dates(l1[0]) ][0]
date=re.split('[- / . ' ' ]',l1[0])
#opt["date"] = [date[2] + "/" + date[1] + "/" + date[0]]
opt["Date"] = [ newDate.strftime("%Y - %m - %d") ]
return opt
else:
opt['date'] = "date is not present"
return opt
# os.remove(filename)
# return text
|
prasadbiradar/date-extraction-from-images
|
testdata.py
|
testdata.py
|
py
| 2,928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "data.Data",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "data.set",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "data.set",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datefinder.find_dates",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 70,
"usage_type": "call"
}
] |
20755734857
|
import pandas as pd
import logging as lg
import pickle
lg.basicConfig(filename='data_test_automation.log', level=lg.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %('
'message)s', datefmt='%m-%d %H:%M',
filemode='w')
def automated(a):
"""This function takes the input of the test data file location, and performs all the data processing done
on the test data set.
For logs check the data_test_automation.log file in your system"""
try:
lg.warning("user gave the input path/file as:"+' '+str(a))
df=pd.read_excel(a)
lg.warning("data successfully loaded from the file/path"+' '+str(a))
lg.info("starting all the pre-processing done for the train dataset")
df.dropna(inplace=True)
lg.warning("successfully dropped all null values in the given dataset")
def change_into_datetime(col):
df[col]=pd.to_datetime(df[col])
for i in ['Date_of_Journey','Dep_Time', 'Arrival_Time']:
change_into_datetime(i)
lg.info("successfully changed the required columns into datetime format")
df['journey_day']=df['Date_of_Journey'].dt.day
lg.info("successfully extracted day from Date_of_journey and creating a separate column for day")
df['journey_month']=df['Date_of_Journey'].dt.month
lg.info("successfully extracted month from Date_of_Journey and creating a separate column for month")
def extract_hour(data,col):
data[col+'_hour']=data[col].dt.hour
def extract_min(data,col):
data[col+'_min']=data[col].dt.minute
def drop_col(data,col):
data.drop(col,axis=1,inplace=True)
extract_hour(df,'Dep_Time')
lg.info("successfully extracted hours from Dep_Time and dumped the data into new column Dep_Time_hour")
extract_min(df,'Dep_Time')
lg.info("successfully extracted minutes from Dep_Time and dumped the data into new column Dep_Time_min")
drop_col(df,'Dep_Time')
lg.warning("dropping the original Dep_Time column as we extracted the values form that column")
extract_hour(df,'Arrival_Time')
lg.info("successfully extracted hours from Arrival_Time and dumped the data into new column Arrival_Time_hour")
extract_min(df,'Arrival_Time')
lg.info("successfully extracted min from Arrival_Time and dumped the data into new column Arrival_Time_min")
drop_col(df,'Arrival_Time')
lg.warning("dropping the original Arrival_Time column as we extracted the values form that column")
duration = list(df["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2:
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m"
else:
duration[i] = "0h " + duration[i]
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0]))
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1]))
df["Duration_hours"] = duration_hours
lg.info("successfully extracted hours from Duration column and dumped the data into new column Duration_hours")
df["Duration_mins"] = duration_mins
lg.info("successfully extracted minutes from Duration column and dumped the data into new column Duration_mins")
df.drop(["Date_of_Journey","Duration","Additional_Info"], inplace=True,axis=1)
lg.warning("dropping the Date_of_Journey, Duration, Additional_Info columns as we extracted the required "
"information")
Airline=pd.get_dummies(df['Airline'],drop_first=True)
lg.info("creating dummy variables for Airline and dropping the first dummy column")
source=pd.get_dummies(df['Source'],drop_first=True)
lg.info("creating dummy variables for Source and dropping the first dummy column")
destination=pd.get_dummies(df['Destination'],drop_first=True)
lg.info("creating dummy variables for Destination and dropping the first dummy column")
dict={'non-stop':0, '2 stops':2, '1 stop':1, '3 stops':3, '4 stops':4}
df['Total_Stops']=df['Total_Stops'].map(dict)
lg.info("successfully mapped the Total_Stops column to 0,1,2,3,4 respectfully")
df=pd.concat([df, Airline, source, destination], axis = 1)
lg.warning("concatenating all the newly created columns into the main dataframe")
df.drop(["Airline", 'Source', 'Destination','Route'],inplace=True,axis=1)
lg.warning("dropping the categorical columns as we dummy encoded them")
df['Trujet']=0
lg.info("adding an extra column as this feature is not there in our test dataset")
model = open('flight_rf.pkl','rb')
forest = pickle.load(model)
lg.info("loading our test model for prediction")
y_prediction = forest.predict(df)
lg.info("processing the prediction")
a=pd.DataFrame(y_prediction)
lg.info("dumping all our predicted values into a dataframe and showing the results")
print(a)
return a
except Exception as e:
lg.warning("error occurred during execution, which is:"+' '+str(e))
return "error occurs is:"+' '+str(e)
a=input(str("give the file path or file name:"))
automated(a)
|
InduMouliMahamkali/flightfareprediction
|
pre-processing and modeling/automated_model_test.py
|
automated_model_test.py
|
py
| 5,520 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.warning",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 122,
"usage_type": "call"
}
] |
74903206266
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
if not root:
return []
queue = deque([root])
result = []
while queue:
levelLength = len(queue)
levelResult = []
for i in range(levelLength):
current_node = queue.popleft()
levelResult.append(current_node.val)
if current_node.left:
queue.append(current_node.left)
if current_node.right:
queue.append(current_node.right)
result.append(levelResult)
return result
|
eungang3/Leetcode
|
binary-tree-level-order-traversal/binary-tree-level-order-traversal.py
|
binary-tree-level-order-traversal.py
|
py
| 973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
}
] |
15974434638
|
# Api Agenda Lionx
from src.infrastructures.mongo.mongo_infrastructure import MongoInfrastructure
# Third party
from decouple import config
from pymongo.cursor import Cursor
from pymongo.collection import InsertOneResult, UpdateResult
class MongoRepository:
def __init__(self):
self.mongo_client = MongoInfrastructure.get_client()
self.database = self.mongo_client.get_database(config("DATABASE_NAME"))
self.collection = self.database.get_collection(config("COLLECTION_NAME"))
def get_all_contacts(self) -> Cursor:
remove_pymongo_id = {"_id": 0}
contacts_cursor = self.collection.find({}, remove_pymongo_id)
return contacts_cursor
def get_contact_by_id(self, id) -> dict:
filter_by_id = {"contact_id": id, "situation": "active"}
remove_pymongo_id = {"_id": 0}
contact = self.collection.find_one(filter_by_id, remove_pymongo_id)
return contact
def get_contacts_by_first_letters(self, letters) -> Cursor:
regex_first_letters_contact = {"$regex": f"^{letters}", "$options": "i"}
filter_by_first_name_letters = {"firstName": regex_first_letters_contact, "situation": "active"}
remove_pymongo_id = {"_id": 0}
contacts_cursor = self.collection.find(filter_by_first_name_letters, remove_pymongo_id)
return contacts_cursor
def register_contact(self, new_contact) -> InsertOneResult:
insert_result = self.collection.insert_one(new_contact)
return insert_result
def update_contact(self, edited_contact, id) -> UpdateResult:
filter_by_id = {"contact_id": id}
new_values = {"$set": edited_contact}
update_result = self.collection.update_one(filter_by_id, new_values)
return update_result
def soft_delete_contact(self, id) -> UpdateResult:
filter_by_id = {"contact_id": id}
field_to_update = {"$set": {"situation": "deactivated"}}
update_result = self.collection.update_one(filter_by_id, field_to_update)
return update_result
|
vinireeis/api_agenda_lionx
|
src/repositories/mongo/repository.py
|
repository.py
|
py
| 2,050 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "src.infrastructures.mongo.mongo_infrastructure.MongoInfrastructure.get_client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.infrastructures.mongo.mongo_infrastructure.MongoInfrastructure",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "decouple.config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pymongo.cursor.Cursor",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pymongo.cursor.Cursor",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pymongo.collection.InsertOneResult",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pymongo.collection.UpdateResult",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pymongo.collection.UpdateResult",
"line_number": 44,
"usage_type": "name"
}
] |
29944774792
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], 'utils'))
import numpy as np
import pandas as pd
import argparse
import h5py
import librosa
from scipy import signal
import matplotlib.pyplot as plt
import time
import csv
import random
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from multiprocessing import cpu_count
from utilities import read_audio, create_folder, read_meta
import config
from tqdm import tqdm
# Global flags and variables.
PLOT_FEATURES = False
class LogMelExtractor():
def __init__(self, sample_rate, window_size, overlap, mel_bins):
self.window_size = window_size
self.overlap = overlap
# Loading hamming window and Mel-filters.
self.ham_win = np.hamming(window_size)
self.melW = librosa.filters.mel(sr=sample_rate,
n_fft=window_size,
n_mels=mel_bins,
fmin=50.,
fmax=sample_rate // 2).T
# transform: Assumes a numpy array representing raw audio-signal.
def transform(self, audio):
ham_win = self.ham_win
window_size = self.window_size
overlap = self.overlap
# Compute a spectrogram with consecutive Fourier transforms.
[f, t, x] = signal.spectral.spectrogram(
audio,
window=ham_win,
nperseg=window_size,
noverlap=overlap,
detrend=False,
return_onesided=True,
mode='magnitude')
x = x.T
# Applying mel-filters on sequence of fourier transforms.
x = np.dot(x, self.melW)
# Applying log on mel-filters.
x = np.log(x + 1e-8)
x = x.astype(np.float32)
return x
def calculate_logmel(audio_path, sample_rate, feature_extractor, n=-1):
# Read audio (first 4 seconds only).
(audio, fs) = read_audio(audio_path, target_fs=sample_rate)
# Extract feature
feature = feature_extractor.transform(audio)
return feature, n
def calculate_features(args):
n_jobs = cpu_count()
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
print("Using {} workers in parallel.".format(n_jobs))
# Arguments.
dataset_dir = args.dataset_dir
workspace = args.workspace
features_type = args.mode
features_file_name = args.features_file_name
# Parameters for feature extraction.
metadata_delimiter = config.metadata_delimiter
sample_rate = config.sample_rate
window_size = config.window_size
overlap = config.overlap
seq_len = config.seq_len
mel_bins = config.mel_bins
# Dislaying arguments and parameters.
print("Arguments and Parameters:")
print("Dataset Directory: {}".format(dataset_dir))
print("Workspace: {}".format(workspace))
print("Sample Rate: {}".format(sample_rate))
print("Window Size: {}".format(window_size))
print("Overlapping Frames: {}".format(overlap))
print("Sequence Length: {}".format(seq_len)) # Dimension of feature corresponding to each audio file: (seq_len, mel_bins)
print("Mel Bins: {}".format(mel_bins))
# Paths
audio_dir = os.path.join(dataset_dir, 'audio')
meta_csv = os.path.join(dataset_dir, 'metadata', 'UrbanSound8K.csv')
hdf5_path = os.path.join(workspace, 'features', features_type, features_file_name)
# Displaying paths.
print("Reading audio from: {}".format(audio_dir))
print("Reading meatadata file form: {}".format(meta_csv))
print("Saving the extracted features at: {}".format(hdf5_path))
create_folder(os.path.dirname(hdf5_path))
# Feature extractor
feature_extractor = LogMelExtractor(sample_rate=sample_rate,
window_size=window_size,
overlap=overlap,
mel_bins=mel_bins)
audio_names, fs_IDs, start_times, end_times, saliences, folds, class_IDs, classes = read_meta(meta_csv, metadata_delimiter)
# Create hdf5 file
hf = h5py.File(hdf5_path, 'w')
# Intialising hdf5 file to store audios/labels of all folds.
for fold_id in range(1, 11):
hf.create_dataset(
name='features_fold{}'.format(fold_id),
shape=(0, seq_len, mel_bins),
maxshape=(None, seq_len, mel_bins),
dtype=np.float32)
hf.create_dataset(
name='labels_fold{}'.format(fold_id),
shape=(0, 1),
maxshape=(None, 1),
dtype=np.float32)
# To remember number of audio files processed in each fold.
fold_count = [0] * 11
for (n, audio_name) in enumerate(audio_names):
# Calculate feature.
audio_path = os.path.join(audio_dir, 'fold{}'.format(folds[n]), audio_name)
futures.append(executor.submit(partial(calculate_logmel, audio_path, sample_rate, feature_extractor, n)))
for future in tqdm(futures):
if future.result() is not None:
feature, n = future.result()
hf['features_fold{}'.format(folds[n])].resize((fold_count[folds[n]] + 1, seq_len, mel_bins))
hf['features_fold{}'.format(folds[n])][fold_count[folds[n]]] = feature
hf['labels_fold{}'.format(folds[n])].resize((fold_count[folds[n]] + 1, 1))
hf['labels_fold{}'.format(folds[n])][fold_count[folds[n]]] = class_IDs[n]
fold_count[folds[n]] += 1
# Plot log-Mel for debug.
if PLOT_FEATURES:
plt.matshow(feature.T, origin='lower', aspect='auto', cmap='jet')
plt.show()
hf.close()
# Displaying total files processed from each fold.
print("Files Processed from each fold:")
for fold_id in range(1, 11):
print("Fold {}: {} files.".format(fold_id, fold_count[fold_id]))
# USAGE: python features.py logmel --dataset_dir=$DATASET_DIR --workspace=$WORKSPACE
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode') # Different modes can be added to extract different type of features.
parser_logmel = subparsers.add_parser('logmel')
parser_logmel.add_argument('--dataset_dir', type=str, required=True) # Path to the UrbanSound8K folder.
parser_logmel.add_argument('--workspace', type=str, required=True) # Directory where extracted features, model and logs of experiments are stored.
parser_logmel.add_argument('--features_file_name', type=str, required=True) # logmel-features.h5
args = parser.parse_args()
if args.mode == 'logmel':
calculate_features(args)
else:
raise Exception('Incorrect arguments!')
|
iamjanvijay/Background-Sound-Classification-in-Speech-Audio-Segments
|
utils/features.py
|
features.py
|
py
| 7,026 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "numpy.hamming",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "librosa.filters.mel",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "librosa.filters",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.spectral.spectrogram",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "scipy.signal.spectral",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "numpy.dot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "utilities.read_audio",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ProcessPoolExecutor",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "config.metadata_delimiter",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "config.sample_rate",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "config.window_size",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "config.overlap",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "config.seq_len",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "config.mel_bins",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "utilities.create_folder",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "utilities.read_meta",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.matshow",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 184,
"usage_type": "call"
}
] |
16601473639
|
import socket
import serial
import sqlite3
import select
import time
import datetime
HEADERSIZE = 10
running_on_pie = False # pie or windows
if running_on_pie:
host = '192.168.1.10'
pos = '192.168.1.10'
win1 = '192.168.1.11'
win2 = '192.168.1.12'
conn = sqlite3.connect('/home/sysop/pos/order.db')
robot = serial.Serial('/dev/ttyUSB0', 19200)
else:
host = '192.168.86.26'
pos = '192.168.86.26'
win1 = '192.168.86.26'
win2 = '192.168.86.11'
conn = sqlite3.connect('order.db')
robot = serial.Serial('COM9', 19200)
port = 12345
c = conn.cursor()
send_to_bot = False
send_to_w1 = False
send_to_w2 = False
send_to_pos = False
bot_data = ''
bot_hold = ''
old_data = ''
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((host, port))
serverSocket.listen(5)
sockets_list = [serverSocket]
clients = {}
print('Listening for connections on {}:{}...'.format(host, port))
def un_start():
command = "/usr/bin/sudo /sbin/shutdown -r now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
def log_it(note):
if running_on_pie:
f = open("/home/sysop/pos/log.txt", "a+")
else:
f = open("log.txt", "a+")
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y %H:%M:%S')
info = st + ' > '
info += note
info += '\n\r'
f.write(info)
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS donut(donutID int, drink int, topping int, orderNUM int,'
' pay int)')
def data_test(a1):
c.execute("SELECT * FROM donut WHERE orderNUM=:a1", {"a1": str(a1)})
data1 = c.fetchall()
if data1:
return data1
def data_delete(a1):
c.execute("DELETE FROM donut WHERE orderNUM=:a1", {"a1": str(a1)})
conn.commit()
# Handles message receiving
def receive_message(client_socket1):
try:
message_header = client_socket1.recv(HEADERSIZE)
if not len(message_header):
return False
message_length = int(message_header.decode('utf-8').strip())
return {'header': message_header, 'data': client_socket1.recv(message_length)}
except Exception as e:
print(e)
return False
def order_process(order):
if len(order) == 4:
xa = str(data_test(order))
if xa != 'None':
xa = xa.strip('[]()')
xa = xa.replace(" ", "")
xa = xa.split(",")
dm = '$A' + str(xa[0])
if str(xa[1]) == '0':
dm += '1000#'
else:
dm += str(xa[1])
dm += '#'
else:
dm = xa
return dm
create_table()
while True:
read_sockets, _, exception_sockets = select.select(sockets_list, [], sockets_list)
for notified_socket in read_sockets:
if notified_socket == serverSocket:
client_socket, client_address = serverSocket.accept()
# Client should send his name right away, receive it
user = receive_message(client_socket)
# If False - client disconnected before he sent his name
if user is False:
continue
# Add accepted socket to select.select() list
sockets_list.append(client_socket)
# Also save username and username header
clients[client_socket] = user
print('Accepted connection from {}, username: {}'.format(client_address, user['data'].decode('utf-8')))
log_it('Accepted connection from {}, username: {}'.format(client_address, user['data'].decode('utf-8')))
else:
# Receive message
message = receive_message(notified_socket)
# If False, client disconnected, cleanup
if message is False:
print('Closed connection from: {}'.format(clients[notified_socket]['data'].decode('utf-8')))
log_it('Closed connection from: {}'.format(clients[notified_socket]['data'].decode('utf-8')))
# Remove from list for socket.socket()
sockets_list.remove(notified_socket)
# Remove from our list of users
del clients[notified_socket]
continue
# Get user by notified socket, so we will know who sent the message
user = clients[notified_socket]
data = message["data"].decode("utf-8")
line = ''
if data != 'ready':
if data != old_data:
print('Received message from {}: {}'.format(user["data"].decode("utf-8"), data))
log_it('Received message from {}: {}'.format(user["data"].decode("utf-8"), data))
old_data = data
if user["data"] == 'm1'.encode("utf-8"):
# start the order
if data == 'A order In':
message2_header = '{:10}'.format(len(data))
message['header'] = message2_header.encode("utf-8")
message['data'] = data.encode("utf-8")
send_to_w1 = True
# pos info
if user["data"] == 'pos'.encode("utf-8"):
robot.write(data.encode("utf-8"))
time.sleep(1)
line_in = robot.readline()
line = line_in.decode("utf-8")
line = line.rstrip()
if line[0] == '$':
message2_header = '{:10}'.format(len(line))
message['header'] = message2_header.encode("utf-8")
message['data'] = line.encode("utf-8")
print(line.encode("utf-8"))
send_to_pos = True
# window A data and processing
if user["data"] == 'w1'.encode("utf-8"):
if len(data) == 4:
if data == '0000':
un_start()
message2 = order_process(data)
bot_hold = message2
time.sleep(1)
message2_header = '{:10}'.format(len(message2))
message['header'] = message2_header.encode("utf-8")
message['data'] = message2.encode("utf-8")
send_to_w1 = True
if len(data) == 5:
if data == 'start':
robot.write(bot_hold.encode("utf-8"))
time.sleep(1)
if data == 'ready':
line_in = robot.readline()
line = line_in.decode("utf-8")
line = line.rstrip()
# if line == 'end':
message2_header = '{:10}'.format(len(line))
message['header'] = message2_header.encode("utf-8")
message['data'] = line.encode("utf-8")
send_to_w1 = True
# main com arduino
# if robot.in_waiting > 0:
# line_in = robot.readline()
# line = line_in.decode("utf-8")
# line = line.rstrip()
#
# if len(line) > 1:
#
# if line == 'end' and user["data"] == 'w1'.encode("utf-8"):
# message2_header = '{:10}'.format(len(line))
# message['header'] = message2_header.encode("utf-8")
# message['data'] = line.encode("utf-8")
# send_to_w1 = True
# if line[0] == '$' and user["data"] == 'pos'.encode("utf-8"):
# message2_header = '{:10}'.format(len(line))
# message['header'] = message2_header.encode("utf-8")
# message['data'] = line.encode("utf-8")
# send_to_pos = True
# Iterate over connected clients and broadcast message
for client_socket in clients:
# sent it
the_ip = client_socket.getpeername()[0]
if the_ip == win1 and send_to_w1:
client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])
print(user['header'] + user['data'] + message['header'] + message['data'])
send_to_w1 = False
if the_ip == win2 and send_to_w2:
client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])
send_to_w2 = False
if the_ip == pos and send_to_pos:
client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])
print(user['header'] + user['data'] + message['header'] + message['data'])
send_to_pos = False
line = ''
# It's not really necessary to have this, but will handle some socket exceptions just in case
for notified_socket in exception_sockets:
# Remove from list for socket.socket()
sockets_list.remove(notified_socket)
# Remove from our list of users
del clients[notified_socket]
|
RG11rant/donuts
|
server.py
|
server.py
|
py
| 9,334 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_REUSEADDR",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "select.select",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 211,
"usage_type": "call"
}
] |
13442824529
|
import pygame, os
from modules.entitysets._puresensor import PureSensor
from imageload import loadImage
from button import Button
from menustate import MenuState
from staticimage import StaticImage
from gridrounding import gridRound
from selectionbox import SelectionBox
from label import Label
class RemoveSensorButton( Button ):
image = loadImage("remove.png", 2 )
def __init__( self, menu=None ):
Button.__init__( self, None, None, menu )
self.rect = self.image.get_rect()
self.rect.topleft = ( 54, 24 )
def push( self, clickKey, click ):
if "up" in clickKey:
self.parentState.toggleRemove()
class SnapToGridButton( Button ):
image = loadImage( "gridbutton.png", 2 )
def __init__( self, menu=None ):
Button.__init__( self, None, None, menu )
self.rect = self.image.get_rect()
self.rect.topleft = ( 24, 24 )
def push( self, clickKey, click ):
if "up" in clickKey:
self.parentState.toggleSnapToGrid()
class SensorEditButton( Button ):
image = loadImage( "sensoreditbutton.png", 2 )
rect = image.get_rect()
rect.topleft = ( 24, 144 )
def __init__( self, menu=None ):
Button.__init__( self, None, None, menu )
def push( self, clickKey, click ):
if "up" in clickKey:
aBoundEditState = SensorEditState( self.parentState.menu )
self.parentState.menu.loadMenuState( aBoundEditState )
class SensorEditState( MenuState ):
def __init__( self, menu, sprites=[] ):
MenuState.__init__( self, menu, sprites )
self.sprites = [self.fileNameLabel, self.miniMap]
self.buttons = []
self.panel = StaticImage(loadImage( "devmenu.png", 2 ), (10, 10))
self.addSprite( self.panel )
self.snapToGridButton = SnapToGridButton( self )
self.addButton( self.snapToGridButton )
self.removeButton = RemoveSensorButton( self )
self.addButton( self.removeButton )
self.gridButtonSelectionBox = None
self.removeButtonSelectionBox = None
self.addingMode = True
self.removingMode = False
self.curGrabbedSens = None
self.curStart = None
self.gridX = 40
self.gridY = 40
self.snapToGrid = False
self.whereEntWasGrabbed = None
def toggleSnapToGrid( self ):
self.snapToGrid = not self.snapToGrid
if self.gridButtonSelectionBox is None:
self.gridButtonSelectionBox = SelectionBox( self.snapToGridButton.rect, self )
self.addSprite( self.gridButtonSelectionBox )
else:
self.removeSprite( self.gridButtonSelectionBox )
self.gridButtonSelectionBox = None
self.menu.loadMenuState( self )
def toggleRemove( self ):
self.removingMode = not self.removingMode
if self.removeButtonSelectionBox is None:
self.removeButtonSelectionBox = SelectionBox( self.removeButton.rect, self )
self.addSprite( self.removeButtonSelectionBox )
else:
self.removeSprite( self.removeButtonSelectionBox )
self.removeButtonSelectionBox = None
self.menu.loadMenuState( self )
def getPressedSensor( self, point ):
"""See which sensor is at this point"""
escape = False
for eachSpriteList in ( eachGroup.sprites() for eachGroup in self.menu.playState.groups ):
for eachSprite in [ sprite for sprite in eachSpriteList if sprite.pureSensor]:
if eachSprite.rect.collidepoint( point ):
return eachSprite
def update( self, dt, click, clickKey, curMousePos=None ):
MenuState.update( self, dt, click, clickKey, curMousePos )
playState = self.menu.playState
curMousePos = curMousePos[0]-playState.panX, curMousePos[1]-playState.panY
if self.snapToGrid:
curMousePos = gridRound( curMousePos, self.gridX, self.gridY, trueRounding=True )
else:
curMousePos = curMousePos
if self.curStart is not None:
self.menu.playState.lineVisualiser.devMenuLineGroups = [ [ self.curStart, ( self.curStart[0], curMousePos[1] ) ],
[ ( self.curStart[0], curMousePos[1] ), curMousePos ], [ curMousePos, ( curMousePos[0], self.curStart[1] ) ], [ ( curMousePos[0], self.curStart[1] ), self.curStart ] ]
self.menu.playState.lineVisualiser.devMenuLineGroups = [ [ (each[0]+playState.panX, each[1]+playState.panY) for each in eachLine ] for eachLine in self.menu.playState.lineVisualiser.devMenuLineGroups ]
self.menu.playState.lineVisualiser.flush = True
self.menu.playState.lineVisualiser.renderLines = True
self.menu.playState.lineVisualiser.renderPhysicsLines = True
self.menu.playState.lineVisualiser.forceNoRender = True
if click is not None:
if clickKey is 'mouse1down' and self.curStart is None:
self.curStart = curMousePos
elif clickKey is 'mouse1up':
#ADD SENSOR HERE
destPoint = min( self.curStart[0], curMousePos[0] ), min( self.curStart[1], curMousePos[1] )
w = abs( self.curStart[0] - curMousePos[0] )
h = abs( self.curStart[1] - curMousePos[1] )
if w != 0 or h != 0:
destPoint = destPoint[0] + w/2, destPoint[1] + h/2
destGroup = getattr( self.menu.playState, PureSensor.playStateGroup )
PureSensor( pos=destPoint, group=destGroup, width=w, height=h )
self.curStart = None
elif clickKey is 'mouse3down':
self.curGrabbedSens = self.getPressedSensor( (curMousePos[0]+playState.panX, curMousePos[1]+playState.panY) )
if self.curGrabbedSens is not None:
entPos = self.curGrabbedSens.getPosition()
self.whereEntWasGrabbed = curMousePos[0] - entPos[0], curMousePos[1] - entPos[1]
elif clickKey is 'mouse3up':
pickedSensor = self.getPressedSensor( (curMousePos[0]+playState.panX, curMousePos[1]+playState.panY) )
if pickedSensor is not None:
if self.removingMode:
pickedSensor.kill()
self.curGrabbedSens = None
self.whereEntWasGrabbed = None
elif curMousePos is not None:
if self.curGrabbedSens is not None:
curEnt = self.curGrabbedSens
newPos = curMousePos[0]-self.whereEntWasGrabbed[0], curMousePos[1]-self.whereEntWasGrabbed[1]
curEnt.setPosition( newPos )
|
Occuliner/ThisHackishMess
|
modules/menuentries/sensoredit.py
|
sensoredit.py
|
py
| 6,709 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "button.Button",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "imageload.loadImage",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "button.Button.__init__",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "button.Button",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "button.Button",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "imageload.loadImage",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "button.Button.__init__",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "button.Button",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "button.Button",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "imageload.loadImage",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "button.Button.__init__",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "button.Button",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "menustate.MenuState",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "menustate.MenuState.__init__",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "menustate.MenuState",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "staticimage.StaticImage",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "imageload.loadImage",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "selectionbox.SelectionBox",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "selectionbox.SelectionBox",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "menustate.MenuState.update",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "menustate.MenuState",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "gridrounding.gridRound",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "modules.entitysets._puresensor.PureSensor.playStateGroup",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "modules.entitysets._puresensor.PureSensor",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "modules.entitysets._puresensor.PureSensor",
"line_number": 135,
"usage_type": "call"
}
] |
16791541041
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 23 12:32:47 2022
@author: maksi
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.datasets import load_digits
from keras.models import Sequential
from keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
data = load_digits()
X = data.data
y = data.target
y = pd.Categorical(y)
y = pd.get_dummies(y).values
class_num = y.shape[1]
model = Sequential()
model.add(Dense(64, input_shape = (X.shape[1],), activation = 'relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(class_num, activation = 'softmax'))
learning_rate = 0.0001
model.compile(optimizer= Adam(learning_rate), loss='categorical_crossentropy',metrics=('accuracy'))
model.summary()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model.fit(X_train, y_train, batch_size=32, epochs=100, validation_data=(X_test, y_test), verbose=2)
historia = model.history.history
floss_train = historia['loss']
floss_test = historia['val_loss']
acc_train = historia['accuracy']
acc_test = historia['val_accuracy']
fig,ax = plt.subplots(1,2, figsize=(20,10))
epochs = np.arange(0, 100)
ax[0].plot(epochs, floss_train, label = 'floss_train')
ax[0].plot(epochs, floss_test, label = 'floss_test')
ax[0].set_title('Funkcje strat')
ax[0].legend()
ax[1].set_title('Dokladnosci')
ax[1].plot(epochs, acc_train, label = 'acc_train')
ax[1].plot(epochs, acc_test, label = 'acc_test')
ax[1].legend()
|
makspervov/Podstawy-SI-Python
|
lab5/lab5_zad2.py
|
lab5_zad2.py
|
py
| 1,785 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.Categorical",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 46,
"usage_type": "call"
}
] |
7368403785
|
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
# mainframe.py
# Pomodoro
#
# Created by Roman Rader on 22.06.11.
# New BSD License 2011 Antigluk https://github.com/antigluk/Pomodoro
"""
Contains main frame of application.
"""
import wx
from state import PomodoroStateProxy as PomodoroState
from NotificationCenter.NotificationCenter import NotificationCenter
import logging
logging.getLogger('Pomodoro')
class MainFrameController(wx.Frame):
"""Main frame of Pomodoro"""
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Pomodoro it!',
style=wx.BORDER_DEFAULT | wx.STAY_ON_TOP,
size=(220, 120),
)
state = PomodoroState()
self.__state_dict = {
state.StateNoState: {'bs': '...'},
state.StateInPomodoro: {'bs': u"Отменить..."},
state.StateInRest: {'bs': u"Отдыхайте!"},
state.StateWaitingPomodoro: {'bs': u"Начать помидору"},
state.StateWaitingRest: {'bs': u"Начать отдых"},
state.StatePomodoroKilled: {'bs': u"Начать помидору"},
}
self.buildFrame()
self.updateUI()
self.makeMenu()
self.Show(False)
NotificationCenter().addObserver(self,self.onDBUpdate,"dbUpdated")
NotificationCenter().addObserver(self,self.onUpdateUI,"updateUI")
def buildFrame(self):
self.panel = wx.Panel(self)
self.txt = wx.StaticText(self.panel, pos=(10, 10),
label='Pomodoro!')
self.times_l = wx.StaticText(self.panel, pos=(120, 10),
label=u"0 помидор")
self.timer_ctrl = wx.TextCtrl(self.panel, pos=(10, 30),
size=(200, -1), style=wx.TE_READONLY | wx.TE_CENTER)
self.start_button = wx.Button(self.panel, pos=(20, 70), label=''
, size=(170, -1))
self.start_button.Bind(wx.EVT_BUTTON, self.bClick)
def onUpdateUI(self, event):
self.updateUI()
def updateUI(self):
#TODO: проверять видимо ли окно. иначе не обновлять
#TODO: remove this ugly method
state = PomodoroState()
self.timer_ctrl.SetValue(state.text)
self.start_button.SetLabel(self.__state_dict[state.active]['bs'])
self.txt.SetLabel(state.caption)
self.times_l.SetLabel(u"%d помидор" % state.GetTodayCount())
def bClick(self, m):
logging.debug("Toggle state called from menu")
self.controller.toggleState()
def onExit(self,m):
logging.debug("Quit called from menu")
self.controller.quit()
def makeMenu(self):
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.pomodmenu = wx.Menu()
item = self.filemenu.Append(wx.ID_ANY, "Hide")
self.Bind(wx.EVT_MENU, self.hideFrame, item)
item = self.filemenu.Append(wx.ID_ANY, "Toggle pomodoro")
self.Bind(wx.EVT_MENU, self.bClick, item)
self.filemenu.AppendSeparator()
item = self.filemenu.Append(wx.ID_EXIT, "&Quit", "quit")
self.Bind(wx.EVT_MENU, self.onExit, id=wx.ID_EXIT)
item = self.pomodmenu.Append(wx.ID_ANY, "All", "List of pomodoros")
self.Bind(wx.EVT_MENU, self.showListOfPomodoros, item)
item = self.pomodmenu.Append(wx.ID_ANY, "Statistics", "Statistics")
self.Bind(wx.EVT_MENU, self.showStatistics, item)
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.pomodmenu, "&Pomodors")
self.SetMenuBar(self.menuBar)
def onDBUpdate(self, obj):
pass
def hideFrame(self, m):
logging.debug("Hide frame called from menu")
self.Show(False)
def showListOfPomodoros(self, m):
logging.debug("Show list of pomodors called from menu")
self.controller.showListOfPomodoros()
def showStatistics(self, m):
logging.debug("Show statistics of pomodors called from menu")
self.controller.showStatistics()
|
rrader/Pomodoro
|
pomodoro/mainframe.py
|
mainframe.py
|
py
| 4,124 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "wx.BORDER_DEFAULT",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "wx.STAY_ON_TOP",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "state.PomodoroStateProxy",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "state.StateNoState",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "state.StateInPomodoro",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "state.StateInRest",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "state.StateWaitingPomodoro",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "state.StateWaitingRest",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "state.StatePomodoroKilled",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "NotificationCenter.NotificationCenter.NotificationCenter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "NotificationCenter.NotificationCenter.NotificationCenter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "wx.StaticText",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "wx.StaticText",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "wx.TextCtrl",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "wx.TE_READONLY",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "wx.TE_CENTER",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "wx.Button",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "state.PomodoroStateProxy",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "state.text",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "state.active",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "state.caption",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "state.GetTodayCount",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wx.MenuBar",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "wx.Menu",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "wx.Menu",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "wx.ID_ANY",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_MENU",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_MENU",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_EXIT",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_MENU",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_EXIT",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_MENU",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_MENU",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 118,
"usage_type": "call"
}
] |
17600865196
|
#encoding:UTF-8
import urllib
import urllib.request
import json
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
import matplotlib.lines as mlines
import numpy as np
import time
data = urllib.request.urlopen('https://stationdata.wunderground.com/cgi-bin/stationdata?iconsize=3&width=2048&height=2048&maxage=3600&format=json&maxstations=100000&rf_filter=1&minlat=38&minlon=-85&maxlat=48&maxlon=-73').read()
record = data.decode('UTF-8')
record = record.replace('},}', '}}')
a = open("/Users/hsw/Desktop/CA_region_rawdata.txt", "w+")
a.write(record)
a.close()
data = json.loads(record)
#print(data)
print(data['conds'])
preprocess = data['conds']
station = []
T = []
lats = []
lons = []
for o in preprocess:
id = str(o)
station.append(preprocess[id]['id'])
T.append(5/9*(float(preprocess[id]['tempf'])-32))
lats.append(float(preprocess[id]['lat']))
lons.append(float(preprocess[id]['lon']))
# ============================================ # plot
# ============================================initialize the plot
plt.figure(figsize=(11, 8), dpi=120)
axes = plt.subplot(111)
# set up map projection with
# use low resolution coastlines.
map = Basemap(llcrnrlon=-87, llcrnrlat=38, urcrnrlon=-73, urcrnrlat=48, \
rsphere=(6378137.00, 6356752.3142), \
resolution='i', projection='merc', \
lat_0=40., lon_0=-20., lat_ts=20.)
# draw coastlines, country boundaries, fill continents.
map.drawcoastlines(linewidth=0.25)
map.drawcountries(linewidth=0.25)
# draw the edge of the map projection region (the projection limb)
map.drawmapboundary(fill_color='#87CEFA')#689CD2
# draw lat/lon grid lines every 30 degrees.
#map.drawmeridians(np.arange(0, 360, 10))
map.drawmeridians(np.arange(0, 360, 10),labels=[0,0,0,1],fontsize=10)
#map.drawparallels(np.arange(-90, 90, 10))
map.drawparallels(np.arange(-90, 90, 10),labels=[1,0,0,0],fontsize=10)
# Fill continent wit a different color
map.fillcontinents(color='#FFFFFF', lake_color='#EEEEEE', zorder=0)
# ============================================draw the stations and data
# compute native map projection coordinates of lat/lon grid.
x, y = map(lons, lats)
max_T = max(T)
# Plot each city in a loop.
# Set some parameters
size_factor = 100.0
x_offset = 20.0
y_offset = -20.0
rotation = 0
temp=0
f = open("/Users/hsw/Desktop/CA_region_Tdata.txt", "w+")
f.close()
#draw station point
analyze = ''
for i, j, k, l in zip(x, y, T, station):
temp = temp+1
size = size_factor * k / max_T
if k <= -10.0 and k >= -100.0:
cs1 = map.scatter(i, j, s=15, marker='o', color='#00008F')
if -10 < k and k <= -5:
cs2 = map.scatter(i, j, s=15, marker='o', color='#00009F')
if -5 < k and k <= -2:
cs3 = map.scatter(i, j, s=15, marker='o', color='#0000FF')
if -2 < k and k <= 2:
cs4 = map.scatter(i, j, s=15, marker='o', color='#006FFF')
if 2 < k and k <= 6:
cs5 = map.scatter(i, j, s=15, marker='o', color='#00BFFF')
if 6 <= k and k <= 10:
cs5 = map.scatter(i, j, s=15, marker='o', color='#00FFFF')
if 10 <= k and k <= 14:
cs5 = map.scatter(i, j, s=15, marker='o', color='#4FFFAF')
if 14 <= k and k <= 18:
cs5 = map.scatter(i, j, s=15, marker='o', color='#7FF77F')
if 18 <= k and k <= 22:
cs5 = map.scatter(i, j, s=15, marker='o', color='#FFFF00')
if 22 <= k and k <= 26:
cs5 = map.scatter(i, j, s=15, marker='o', color='#FFBF00')
if 26 <= k and k <= 30:
cs5 = map.scatter(i, j, s=15, marker='o', color='#FF6F00')
if 30 <= k and k <= 35:
cs5 = map.scatter(i, j, s=15, marker='o', color='#FF0000')
if 35 < k and k <= 100:
cs6 = map.scatter(i, j, s=15, marker='o', color='#7F0000')
#if k != 9999:
# plt.text(i, j, str(k) + '°C', rotation=rotation, fontsize=10)
f = open("/Users/hsw/Desktop/CA_region_Tdata.txt", "a+")
f.write(' Station:'+ l + ' Temperature:' + str(k) + '\n')
f.close()
title = '多伦多及附近地区气温分布图\n' + '数据更新时间:' + time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()- 8 * 60 * 60 * 1000)) + 'UTC\n数据来自:wunderground weather, 绘制@Louis_He'
# ============================================#define legends
a = mlines.Line2D([], [], color='#7F0000', marker='o',
markersize=5, label='>35°C',ls='')
b = mlines.Line2D([], [], color='#FF0000', marker='o',
markersize=5, label='>30°C',ls='')
c = mlines.Line2D([], [], color='#FF6F00', marker='o',
markersize=5, label='26~30°C',ls='')
d = mlines.Line2D([], [], color='#FFBF00', marker='o',
markersize=5, label='22~26°C',ls='')
e = mlines.Line2D([], [], color='#FFFF00', marker='o',
markersize=5, label='18~22°C',ls='')
f = mlines.Line2D([], [], color='#7FF77F', marker='o',
markersize=5, label='14~18°C',ls='')
g = mlines.Line2D([], [], color='#4FFFAF', marker='o',
markersize=5, label='10~14°C',ls='')
h = mlines.Line2D([], [], color='#00FFFF', marker='o',
markersize=5, label='6~10°C',ls='')
i = mlines.Line2D([], [], color='#00BFFF', marker='o',
markersize=5, label='2~6°C',ls='')
j = mlines.Line2D([], [], color='#006FFF', marker='o',
markersize=5, label='-2~2°C',ls='')
k = mlines.Line2D([], [], color='#0000FF', marker='o',
markersize=5, label='-5~-2°C',ls='')
l = mlines.Line2D([], [], color='#00009F', marker='o',
markersize=5, label='-10~-5°C',ls='')
m = mlines.Line2D([], [], color='#00008F', marker='o',
markersize=5, label='<-10°C',ls='')
plt.legend(handles=[b, c, d, e, f, g, h, i, j, k, l, m])
plt.title(title)
save = '/Users/hsw/Desktop/CA_region_Tsample.png'
plt.savefig(save, dpi=120)
|
Louis-He/weather_map
|
wunderground_weather.py
|
wunderground_weather.py
|
py
| 6,022 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.basemap.Basemap",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
}
] |
17282581755
|
from pynput.keyboard import Listener, Key
import time
from threading import Thread
from canvas import Canvas
from pedal import Pedal
from snake import Snake
from ball import Ball
import os
def on_press(key):
if hasattr(key, 'char'): # Write the character pressed if available
print(key.char)
elif key == Key.up: # If space was pressed, write a space
print('up')
snake.up()
elif key == Key.down: # If space was pressed, write a space
print('down')
snake.down()
elif key == Key.left: # If space was pressed, write a space
print('left')
snake.left()
elif key == Key.right: # If space was pressed, write a space
print('right')
snake.right()
canvas = Canvas(15, 50)
snake = Snake(10, 5)
snake.left()
pedal = Pedal(1, 7)
ball = Ball(8,7)
gameover = False
while(not gameover):
with Listener(on_press=on_press) as ls:
def time_out(period_sec: int):
time.sleep(period_sec) # Listen to keyboard for period_sec seconds
ls.stop()
os.system('cls' if os.name == 'nt' else 'clear')
Thread(target=time_out, args=(0.5,)).start()
ls.join()
#move entities
ball.collision(canvas)
ball.move()
snake.move()
pedal.move(canvas)
canvas.clear()
canvas.createBorder()
if(snake.detectColission(canvas) or ball.getgameOver()):
gameover = True
#print entities
canvas = snake.drawSnake(canvas)
canvas = pedal.print(canvas)
canvas = ball.print(canvas)
canvas.print()
print("your score: ",ball.score)
os.system('cls' if os.name == 'nt' else 'clear')
print("Loser your score was: ",ball.score)
|
devbit-algorithms/snakepong-snaka69
|
gameLoop.py
|
gameLoop.py
|
py
| 1,665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pynput.keyboard.Key.up",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "snake.up",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Key.down",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "snake.down",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Key.left",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "snake.left",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Key.right",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "snake.right",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "canvas.Canvas",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "snake.Snake",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "snake.left",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pedal.Pedal",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ball.Ball",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard.Listener",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ball.collision",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ball.move",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "snake.move",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pedal.move",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "canvas.clear",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "canvas.createBorder",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "snake.detectColission",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "ball.getgameOver",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "snake.drawSnake",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pedal.print",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "ball.print",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "canvas.print",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "ball.score",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "ball.score",
"line_number": 64,
"usage_type": "attribute"
}
] |
25849126163
|
import random
import time
from colorama import Back, Fore, init
from SudokuF import *
from SudokuT import *
from Menus import *
from Ahorcado import *
lop = 0
while lop == 0:
menuprincipal()
opcionprincipal = input(Fore.BLUE + "[4] Finalizar: " + Fore.RESET)
if opcionprincipal == "Fernando": #Easter egg
newgame()
if opcionprincipal == "Endgame": #Easter egg
creditoss()
exit()
if opcionprincipal == "Ayuda": #Easter egg
jueguito()
if opcionprincipal == "Mario":
from mario_level_1 import *
if __name__ == '__main__':
main()
pg.quit()
#sys.exit()
numbers2 = opcionprincipal.split()
while len(numbers2) != 1 or not numbers2[0].isdigit() or int(numbers2[0]) > 4 or int(numbers2[0]) < 1:
print(Fore.RED + "...Opción incorrecta. Intente nuevamente." + Fore.RESET)
opcionprincipal = input(Fore.BLUE + "[4] Finalizar: " + Fore.RESET)
numbers2 = opcionprincipal.split()
opcionprincipal = int(numbers2[0])
loop = True
while loop == True:
if opcionprincipal == 1:
name = input(Fore.MAGENTA + "Nombre del jugador: " + Fore.RESET)
name_tablero = input(Fore.LIGHTMAGENTA_EX +"Nombre del tablero: " + Fore.RESET)
tablero = tableros() #[[1,3,4,5,8,9,2,6,7],[8,5,7,2,6,3,1,9,4],[9,6,2,1,4,7,8,3,5],[2,9,3,7,1,8,4,5,6],[5,4,1,3,2,6,7,8,9],[7,8,6,9,5,4,3,2,1],[4,7,9,8,3,5,6,1,2],[6,2,8,4,9,1,5,7,3],[3,1,5,6,7,2,9,4,0]]
imprimeTablero(tablero, name_tablero)
tablerverificar = []
for i in range(9):
tablerverificar.append([])
for j in range(9):
tablerverificar[i].append(tablero[i][j])
while not tableroCompleto(tablero):
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
while verificardigitosvalidos(numbers) == False:
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
x = int(numbers[0])
y = int(numbers[1])
d = int(numbers[2])
if verificarjugadavalida(tablero, x, y, d, tablerverificar) == True:
tablero[x-1][y-1] = d
imprimeTablero(tablero, name_tablero)
if final(tablero, name, name_tablero) == True:
loop = False
elif opcionprincipal == 2:
print("Desea cargar un tablero propio?")
print("[1] Si")
opcion = (input("[2] Cargar Tableros predefinidos: "))
while opcion != "1" and opcion != "2":
print(Fore.RED + "Opción incorrecta. Intente nuevamente." + Fore.RESET)
opcion = (input("[2] Cargar Tableros predefinidos: "))
opcion = int(opcion)
if opcion == 1:
vertablerosguardados()
if opcion == 2:
print()
alltableros()
numero = input("Ingrese el número del tablero que desea ver/jugar: ")
while not numero.isdigit() or int(numero) > 10 or int(numero) < 1:
print(Fore.RED + "Opción incorrecta. Intente nuevamente." + Fore.RESET)
numero = input("Ingrese el número del tablero que desea ver/jugar: ")
numero = int(numero)
tablero = selecionartablero(numero)
imprimeTablero(tablero, " ")
print("Desea jugar este tablero?:")
print("[1] Si")
opcion = (input("[2] Regresar: "))
opcion = opcion.split()
while len(opcion) != 1 or not opcion[0].isdigit() or int(opcion[0]) > 2 or int(opcion[0]) < 1:
print(Fore.RED + "...Opción incorrecta. Intente nuevamente." + Fore.RESET)
opcion = (input("[2] Regresar: "))
opcion = opcion.split()
opcion = int(opcion[0])
# verificar que la opcion introducida sea valida
if opcion == 1:
tablerverificar = []
for i in range(9):
tablerverificar.append([])
for j in range(9):
tablerverificar[i].append(tablero[i][j])
imprimeTablero(tablero, name_tablero = "")
while not tableroCompleto(tablero):
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
while verificardigitosvalidos(numbers) == False:
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
x = int(numbers[0])
y = int(numbers[1])
d = int(numbers[2])
if verificarjugadavalida(tablero, x, y, d, tablerverificar) == True:
tablero[x-1][y-1] = d
imprimeTablero(tablero, name_tablero = "")
creditosfinales()
print(Fore.MAGENTA + "Felicidades, has ganado el juego" + Fore.RESET)
loop = False
elif opcion == 2:
print("volviendo al menu principal")
loop = False
elif opcionprincipal == 3:
print("Selecione la dificultad del tablero")
print(Fore.GREEN + "[1] Fácil" + Fore.RESET)
print(Fore.YELLOW+ "[2] Medio" + Fore.RESET)
print(Fore.RED + "[3] Difícil"+ Fore.RESET)
opcion = (input("[4] Regresar: "))
while opcion != "1" and opcion != "2" and opcion != "3" and opcion != "4":
print(Fore.RED + "Opción incorrecta. Intente nuevamente." + Fore.RESET)
opcion = (input("[4] Regresar: "))
opcion = int(opcion)
tablero = tablerodificultad(opcion)
name = input(Fore.MAGENTA + "Nombre del jugador: " + Fore.RESET)
name_tablero = input(Fore.LIGHTMAGENTA_EX +"Nombre del tablero: " + Fore.RESET)
imprimeTablero(tablero, name_tablero)
tablerverificar = []
for i in range(9):
tablerverificar.append([])
for j in range(9):
tablerverificar[i].append(tablero[i][j])
while not tableroCompleto(tablero):
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
while verificardigitosvalidos(numbers) == False:
numbers = input("Ingrese fila columna cifra: ")
numbers = numbers.split()
x = int(numbers[0])
y = int(numbers[1])
d = int(numbers[2])
if verificarjugadavalida(tablero, x, y, d, tablerverificar) == True:
tablero[x-1][y-1] = d
imprimeTablero(tablero, name_tablero)
if final(tablero, name, name_tablero) == True:
loop = False
elif opcionprincipal == 4:
print(Fore.RED + "Gracias por jugar."+ Fore.RESET)
exit()
|
K23NO/Soduko
|
Sudoku.py
|
Sudoku.py
|
py
| 7,562 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "colorama.Fore.BLUE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.BLUE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.MAGENTA",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.LIGHTMAGENTA_EX",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.MAGENTA",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.GREEN",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.MAGENTA",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.LIGHTMAGENTA_EX",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 147,
"usage_type": "attribute"
}
] |
11278711462
|
"""
Here we test a basic strategy that includes an indicator and FX rate movements.
We'll start with an ($100K) AUD denominated portfolio and buy 100 shares of SPY
only if the VIX < 26.
Also, buying in SPY will make us short USD.
Generate funding trades, to be executed the day after we buy SPY, so that
we aren't short USD.
For the sake of testing we'll focus on the dates 1st Sep -> 1st Oct.
Points to note:
- We'll buy 100 shares of SPY @337.11 on Sep 14th VIX=25.85
- Hold until Oct 1st when SPY=337.04, AUDUSD=0.7167
- Because our portfolio is denominated in AUD we need to calculate AUD prices.
- So buying SPY at 337.11 (Sep 14th price) / 0.729682 (fx 15th) = AUD 462
- And holding to a value of 337.04 / 0.716651 = AUD 470.30
- PNL will be $8.30 (= 470.30 - 462.00) for each of 100 shares purchased.
"""
from datetime import date
import pandas as pd
from pxtrade import Trade
from pxtrade.assets import reset, Stock, Cash, FxRate, Portfolio
from pxtrade.backtest import Backtest
from pxtrade.strategy import Strategy
from pxtrade.events.yahoo import load_yahoo_prices
from pxtrade.compliance import Compliance, UnitLimit
from pxtrade.history import History
def test_buy_spy_with_indicator():
# create your stock and portfolio
reset()
spy = Stock("SPY", currency_code="USD")
aud = Cash("AUD")
usd = Cash("USD")
audusd = FxRate("AUDUSD")
portfolio = Portfolio("AUD")
starting_value = 1e5 # start with $100K AUD
portfolio.transfer(aud, starting_value)
# impose a compliance rule so we are unable to
# hold more than 100 shares.
portfolio.compliance = Compliance().add_rule(UnitLimit(spy, 100))
# define a strategy to buy 100 shares of SPY
# if we are short USD then also fund this shortfall with AUD
class BuySpyWithIndicator(Strategy):
def show(self, trades):
if len(trades) == 0:
return
print(backtest.datetime)
print("^VIX: ", backtest.get_indicator("^VIX"))
print("AUDUSD: ", audusd.rate)
print("SPY: ", spy.price)
for trade in trades:
print(trade)
print("-------")
def generate_trades(self):
trades = list()
usd_holding = portfolio.get_holding_units("USD")
if usd_holding < 0:
trades.append(Trade(portfolio, usd, int(-usd_holding) + 1))
if backtest.get_indicator("^VIX") >= 26:
# don't buy any spy, just fund usd (if required)
self.show(trades)
return trades
trades.append(Trade(portfolio, spy, 100))
self.show(trades)
return trades
# create your backtest instance
backtest = Backtest(BuySpyWithIndicator())
history = History(
portfolios=portfolio,
backtest=backtest,
)
# load price events from yahoo for spy, audusd, vix
start_date = date(2020, 9, 1)
end_date = date(2020, 10, 1)
load_yahoo_prices(
[spy, audusd, "^VIX"],
backtest,
start_date=start_date,
end_date=end_date,
)
# run the backtest and check pnl
backtest.run()
df = history.get()
# print(portfolio)
# print(audusd.rate)
print(backtest.datetime)
print(df)
# Note that when running on windows the last FX rate we get from yahoo
# is on the 30 Sep AUDUSD = 7.716651. However, running on linux we get
# a price from 1 Oct of AUDUSD = 0.718288.
# This looks to be an issue with the yahoo api, but it has implications
# for our assertions around portfolio value.
starting_aud_price = 462 # this has not changed
ending_aud_price = 337.04 / audusd.rate
expected_pnl = (ending_aud_price - starting_aud_price) * 100
expected_value = starting_value + expected_pnl
assert round(portfolio.value, -1) == round(expected_value, -1)
start_date = pd.Timestamp(start_date)
end_date = pd.Timestamp(end_date)
assert int(df.at[start_date, "Portfolio"]) == int(starting_value)
assert round(df.at[end_date, "Portfolio"], -1) == round(expected_value, -1)
assert round(df.at[pd.Timestamp(date(2020, 9, 14)), "^VIX"], 2) == 25.85
|
simongarisch/pxtrade
|
tests/test_strategy2.py
|
test_strategy2.py
|
py
| 4,212 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pxtrade.assets.reset",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pxtrade.assets.Stock",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pxtrade.assets.Cash",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pxtrade.assets.Cash",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pxtrade.assets.FxRate",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pxtrade.assets.Portfolio",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pxtrade.compliance.Compliance",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pxtrade.compliance.UnitLimit",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pxtrade.strategy.Strategy",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pxtrade.Trade",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pxtrade.Trade",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pxtrade.backtest.Backtest",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pxtrade.history.History",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pxtrade.events.yahoo.load_yahoo_prices",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 115,
"usage_type": "call"
}
] |
41457469135
|
"""
flaskr.utils.db
~~~~~~~~~~~~~~~
Utilities for database operations.
"""
import sqlite3
from typing import List, Optional
from datetime import datetime, timezone
from flask import g
from flask import current_app
from flaskr.utils.node import Node
def convert_timestamp(t):
return datetime.fromisoformat(t.decode()).replace(tzinfo=timezone.utc).timestamp()
# Register the converter
sqlite3.register_converter("timestamp", convert_timestamp)
def get_db():
"""Connect to the application's configured database. The connection
is unique for each request and will be reused if this is called
again.
"""
if "db" not in g:
g.db = sqlite3.connect(
current_app.config["DATABASE"], detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
"""If this request connected to the database, close the
connection.
"""
db = g.pop("db", None)
if db is not None:
db.close()
def init_db(app):
"""Clear existing data and create new tables."""
with app.app_context():
db = get_db()
with app.open_resource("schema.sql") as f:
db.executescript(f.read().decode("utf8"))
def init_app(app):
"""Register database functions with the Flask app. This is called by
the application factory.
"""
app.teardown_appcontext(close_db)
# modified from https://github.com/matthiask/django-tree-queries/blob/8863c5237f32585cc5ddc21041231155cb806149/tree_queries/compiler.py#L120
CTE = """WITH RECURSIVE __tree(tree_depth,
tree_path,
tree_ordering,
tree_pk) AS (
SELECT
0 tree_depth,
printf("%s ", id) tree_path,
printf(" %020s ", id) tree_ordering,
T.id tree_pk
FROM comment T
WHERE T.parent_id IS NULL
UNION ALL
SELECT
__tree.tree_depth + 1,
__tree.tree_path || printf("%s ", T.id),
__tree.tree_ordering || printf("%020s ", T.id),
T.id
FROM comment T
JOIN __tree ON T.parent_id = __tree.tree_pk
)
SELECT
comment.id,
comment.parent_id,
comment.post_id,
comment.body, comment.created,
comment.author_id, user.username as author_name,
-- __tree.tree_depth
__tree.tree_path
FROM __tree
JOIN comment ON comment.id=__tree.tree_pk
JOIN user ON user.id=comment.author_id
WHERE comment.post_id=?
-- AND instr(__tree.tree_path, '3') !=0
ORDER BY __tree.tree_ordering;
"""
def get_all_comments(post_id: int) -> List[Node]:
root_nodes = Node.build_from_cte_rows(get_db().execute(CTE, (post_id, )))
return list(root_nodes)
def put_in_child(parent: dict, child: dict):
if 'children' in parent:
parent['children'].append(child)
else:
parent['children'] = [child]
def get_post(post_id, with_comments=True) -> Optional[dict]:
"""Get a post and its author by id.
:param post_id: id of post to get
:param with_comments: if return with comments
:return: the post with author information
"""
post = (
get_db()
.execute(
"SELECT p.id, title, body, created, author_id, username"
" FROM post p JOIN user u ON p.author_id = u.id"
" WHERE p.id = ?",
(post_id,),
)
.fetchone()
)
if post is None:
return None
if with_comments:
comments = get_all_comments(post_id=post['id'])
return dict(post) | dict(comments=comments)
else:
return dict(post)
def get_all_posts():
db = get_db()
rst = []
for post in db.execute(
"SELECT p.id, title, body, created, author_id, username as author_name"
" FROM post p JOIN user u ON p.author_id = u.id"
" ORDER BY p.created DESC, p.id DESC"
):
rst.append(dict(post) | dict(comments=get_all_comments(post["id"])))
return rst
|
MioYvo/unlimited-level-messages
|
backend/flaskr/utils/db.py
|
db.py
|
py
| 3,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.fromisoformat",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sqlite3.register_converter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.g.db",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sqlite3.PARSE_DECLTYPES",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.g.db",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sqlite3.Row",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.g.db",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.g.pop",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.node.Node.build_from_cte_rows",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "flaskr.utils.node.Node",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.node.Node",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 111,
"usage_type": "name"
}
] |
13918838862
|
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
import time
from collections import defaultdict
from datetime import datetime
from odoo import api, models
class ManagementDashboard(models.Model):
_name = 'management.dashboard'
_description = "Project Management Dashboard"
@api.model
def get_task_chart_data(self):
"""
Map the Stage Colour and Name to id
Returns:
dictionary -- stage_id as key and details as value
"""
task_obj = self.env['project.task']
task_rec = task_obj.search([])
groups = defaultdict(list)
for obj in task_rec:
groups[obj.stage_id].append(obj)
result = {}
for rec in groups.items():
result.update({
rec[0].name: {
'count': len(rec[1]),
'color': rec[0].color,
'name': rec[0].name,
}})
return result
@api.model
def get_config_values(self):
"""
To get the Colour value for dashboard.
Using sudo env to bypass the access right.
Returns:
dictionary -- Dictionary of config colours.
"""
result = self.env['res.config.settings'].sudo().get_values()
return {
'color_close_state': result.get('color_close_state', False),
'color_control_state': result.get('color_control_state', False),
'color_execute_state': result.get('color_execute_state', False),
'color_init_state': result.get('color_init_state', False),
'color_plan_state': result.get('color_plan_state', False),
'card_header_color': result.get('card_header_color', False),
}
@api.model
def get_color_code(self, project):
# 0:Green, 1:Orange, 2:Red
open_task = 0
open_issue = 0
spent_budget = 0
pending_invoice = 0
# ('date_start', '>=', self._context.get('start_date')),
# ('date_start', '<=', self._context.get('end_date')),
project_tasks = self.env['project.task'].search(
[('project_id', '=', project['id']),
('stage_id.name',
'not in',
("Done", "Completed", "Approval",
"Canceled", "Closure", "Release",
"Implementation")),
('date_end', '=', False)])
today_date = datetime.strptime(
time.strftime('%Y-%m-%d'), '%Y-%m-%d').date()
for task in project_tasks:
if task.schedule_date:
schedule_date = datetime.strptime(
str(task.schedule_date), '%Y-%m-%d %H:%M:%S').date()
daysdiff = (schedule_date - today_date).days
if daysdiff <= 1:
open_task = 2
if daysdiff <= 7 and daysdiff > 1 and open_task != 2:
open_task = 1
if daysdiff > 7 and open_task not in (2, 1):
open_task = 0
project_issues = self.env['helpdesk.ticket'].with_context(
view_project_issues=1).search([
('stage_id.closed', '!=', True),
('project_id.id', '=', project['id']),
('closed_date', '=', False)])
for issue in project_issues or []:
if ((int(int(issue.ticket_aging)) > 30 and issue.priority == '0') or
(int(issue.ticket_aging) > 10 and issue.priority == '1') or
(int(issue.ticket_aging) > 2 and issue.priority in
('2', '3'))):
open_issue = 2
if ((int(issue.ticket_aging) > 10 and int(issue.ticket_aging) <= 30 and
issue.priority == '0') or
(int(issue.ticket_aging) > 2 and int(issue.ticket_aging) <= 10 and
issue.priority == '1') or
(int(issue.ticket_aging) > 0 and int(issue.ticket_aging) <= 2 and
issue.priority in ('2', '3')) and
open_issue != 2):
open_issue = 1
if ((int(issue.ticket_aging) <= 10 and issue.priority == '0') or
(int(issue.ticket_aging) <= 2 and issue.priority == '1') or
(int(issue.ticket_aging) == 0 and issue.priority in
('2', '3')) and
open_issue not in (2, 1)):
open_issue = 0
budget = 0
if project['spent_budget'] > 0 and project['actual_budget'] > 0:
budget = ((project['spent_budget'] - project['actual_budget'])/project['actual_budget']) * 100
if budget > 30:
spent_budget = 2
elif budget > 10 and budget <= 30:
spent_budget = 1
elif budget <= 10:
spent_budget = 0
invoices = self.env['timesheet.invoice'].search([
('project_id', '=', project['id']),
('state', 'in', ('draft', 'confirm', 'pre-approved'))])
for inv in invoices or []:
if int(inv.timesheet_inv_age) > 30:
pending_invoice = 2
elif int(inv.timesheet_inv_age) > 10 and int(inv.timesheet_inv_age) < 30 and pending_invoice != 2:
pending_invoice = 1
elif int(inv.timesheet_inv_age) < 10 and pending_invoice not in (2, 1):
pending_invoice = 0
return {
'spent_budget': spent_budget,
'pending_invoice': pending_invoice,
'open_task': open_task,
'open_issue': open_issue}
@api.model
def get_treeview_id(self, view):
return self.env.ref(view).id,
|
onesteinbv/ProjectManagement
|
management_dashboard/models/management_dashboard.py
|
management_dashboard.py
|
py
| 5,708 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "odoo.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "odoo.api.model",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 143,
"usage_type": "name"
}
] |
29542340141
|
import re
from collections import defaultdict
from string import Template
from odoo import _
from odoo.exceptions import MissingError
DEFAULT_REFERENCE_SEPARATOR = ""
PLACE_HOLDER_4_MISSING_VALUE = "/"
class ReferenceMask(Template):
pattern = r"""\[(?:
(?P<escaped>\[) |
(?P<named>[^\]]+?)\] |
(?P<braced>[^\]]+?)\] |
(?P<invalid>)
)"""
def extract_token(s):
pattern = re.compile(r"\[([^\]]+?)\]")
return set(pattern.findall(s))
def sanitize_reference_mask(product, mask):
tokens = extract_token(mask)
attribute_names = set()
for line in product.attribute_line_ids:
attribute_names.add(line.attribute_id.name)
if not tokens.issubset(attribute_names):
raise MissingError(
_("Found unrecognized attribute name in " '"Partcode Template"')
)
def get_rendered_default_code(product, mask):
product_attrs = defaultdict(str)
reference_mask = ReferenceMask(mask)
for value in product.product_template_attribute_value_ids:
if value.attribute_id.code:
product_attrs[value.attribute_id.name] += value.attribute_id.code
if value.product_attribute_value_id.code:
product_attrs[
value.attribute_id.name
] += value.product_attribute_value_id.code
all_attrs = extract_token(mask)
missing_attrs = all_attrs - set(product_attrs.keys())
missing = dict.fromkeys(missing_attrs, PLACE_HOLDER_4_MISSING_VALUE)
product_attrs.update(missing)
default_code = reference_mask.safe_substitute(product_attrs)
return default_code
def render_default_code(product, mask):
sanitize_reference_mask(product, mask)
product.default_code = get_rendered_default_code(product, mask)
|
odoonz/odoonz-addons
|
product_code_builder/models/helper_methods.py
|
helper_methods.py
|
py
| 1,828 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "string.Template",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "odoo.exceptions.MissingError",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 38,
"usage_type": "call"
}
] |
42012547996
|
# python 3 has different package names
try: from urlparse import urlparse
except ImportError: from urllib.parse import urlparse
from collections import defaultdict
def _new_collection():
""" Collection data type is
{path: {method: (ResponseClass,) }}
So e.g. a POST request to http://venmo.com/feed is stored as
{'/feed': {'POST': (ResponseClass,)}}
the ResponseClass will have had the the constructor partially applied
with the specified stubbed data so after finding it we finish
instantiatiing with the request we received and return it.
Why? So the request attribute on the response is the request that
was made, not just the matching criteria in the stub
"""
return defaultdict(lambda: defaultdict(lambda: ()))
class RequestCollection(object):
_requests = _new_collection()
@classmethod
def add(cls, request, response):
parsed = urlparse(request.url)
cls._requests[parsed.path][request.method] = \
cls._requests[parsed.path][request.method] + (response,)
@classmethod
def find(cls, request):
parsed = urlparse(request.url)
responses = cls._requests[parsed.path][request.method]
if len(responses) > 0:
head = responses[0]
cls._requests[parsed.path][request.method] = \
cls._requests[parsed.path][request.method][1:] + (head,)
else:
head = None
return head
@classmethod
def remove(cls, request):
parsed = urlparse(request.url)
del cls._requests[parsed.path][request.method]
@classmethod
def reset(cls):
cls._requests = _new_collection()
|
venmo/tornado-stub-client
|
tornado_stub_client/collection.py
|
collection.py
|
py
| 1,717 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 45,
"usage_type": "call"
}
] |
71836229628
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_insertar(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(570, 518)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setFlat(True)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.frame_11 = QtWidgets.QFrame(self.groupBox)
self.frame_11.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_11.setObjectName("frame_11")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frame_11)
self.horizontalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.verticalLayout_9.addWidget(self.frame_11)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_9.addItem(spacerItem)
self.frame_12 = QtWidgets.QFrame(self.groupBox)
self.frame_12.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_12.setObjectName("frame_12")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.frame_12)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_12 = QtWidgets.QLabel(self.frame_12)
self.label_12.setFrameShadow(QtWidgets.QFrame.Plain)
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.verticalLayout_10.addWidget(self.label_12)
self.verticalLayout_9.addWidget(self.frame_12)
self.frame_13 = QtWidgets.QFrame(self.groupBox)
self.frame_13.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_13.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_13.setObjectName("frame_13")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.frame_13)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.frame_14 = QtWidgets.QFrame(self.frame_13)
self.frame_14.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_14.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_14.setObjectName("frame_14")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.frame_14)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label_13 = QtWidgets.QLabel(self.frame_14)
self.label_13.setObjectName("label_13")
self.verticalLayout_11.addWidget(self.label_13)
self.label_14 = QtWidgets.QLabel(self.frame_14)
self.label_14.setObjectName("label_14")
self.verticalLayout_11.addWidget(self.label_14)
self.label_15 = QtWidgets.QLabel(self.frame_14)
self.label_15.setObjectName("label_15")
self.verticalLayout_11.addWidget(self.label_15)
self.horizontalLayout_6.addWidget(self.frame_14)
self.frame_15 = QtWidgets.QFrame(self.frame_13)
self.frame_15.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_15.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_15.setObjectName("frame_15")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.frame_15)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.text_act1 = QtWidgets.QLineEdit(self.frame_15)
self.text_act1.setObjectName("text_act1")
self.verticalLayout_12.addWidget(self.text_act1)
self.text_act2 = QtWidgets.QLineEdit(self.frame_15)
self.text_act2.setObjectName("text_act2")
self.verticalLayout_12.addWidget(self.text_act2)
self.text_act3 = QtWidgets.QLineEdit(self.frame_15)
self.text_act3.setObjectName("text_act3")
self.verticalLayout_12.addWidget(self.text_act3)
self.horizontalLayout_6.addWidget(self.frame_15)
self.frame_16 = QtWidgets.QFrame(self.frame_13)
self.frame_16.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_16.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_16.setObjectName("frame_16")
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.frame_16)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_16 = QtWidgets.QLabel(self.frame_16)
self.label_16.setObjectName("label_16")
self.verticalLayout_13.addWidget(self.label_16)
self.label_17 = QtWidgets.QLabel(self.frame_16)
self.label_17.setObjectName("label_17")
self.verticalLayout_13.addWidget(self.label_17)
self.label_18 = QtWidgets.QLabel(self.frame_16)
self.label_18.setObjectName("label_18")
self.verticalLayout_13.addWidget(self.label_18)
self.horizontalLayout_6.addWidget(self.frame_16)
self.frame_17 = QtWidgets.QFrame(self.frame_13)
self.frame_17.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_17.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_17.setObjectName("frame_17")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.frame_17)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.text_act4 = QtWidgets.QLineEdit(self.frame_17)
self.text_act4.setObjectName("text_act4")
self.verticalLayout_14.addWidget(self.text_act4)
self.text_act5 = QtWidgets.QLineEdit(self.frame_17)
self.text_act5.setObjectName("text_act5")
self.verticalLayout_14.addWidget(self.text_act5)
self.text_act6 = QtWidgets.QLineEdit(self.frame_17)
self.text_act6.setObjectName("text_act6")
self.verticalLayout_14.addWidget(self.text_act6)
self.horizontalLayout_6.addWidget(self.frame_17)
self.verticalLayout_9.addWidget(self.frame_13)
self.frame_18 = QtWidgets.QFrame(self.groupBox)
self.frame_18.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_18.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_18.setObjectName("frame_18")
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.frame_18)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.label_19 = QtWidgets.QLabel(self.frame_18)
self.label_19.setAlignment(QtCore.Qt.AlignCenter)
self.label_19.setObjectName("label_19")
self.verticalLayout_15.addWidget(self.label_19)
self.verticalLayout_9.addWidget(self.frame_18)
self.frame_19 = QtWidgets.QFrame(self.groupBox)
self.frame_19.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_19.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_19.setObjectName("frame_19")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.frame_19)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_20 = QtWidgets.QLabel(self.frame_19)
self.label_20.setObjectName("label_20")
self.horizontalLayout_7.addWidget(self.label_20)
self.text_estado = QtWidgets.QLineEdit(self.frame_19)
self.text_estado.setObjectName("text_estado")
self.horizontalLayout_7.addWidget(self.text_estado)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.verticalLayout_9.addWidget(self.frame_19)
self.verticalLayout.addWidget(self.groupBox)
self.frame_10 = QtWidgets.QFrame(Form)
self.frame_10.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_10.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_10.setObjectName("frame_10")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_10)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.btn_guardar = QtWidgets.QPushButton(self.frame_10)
self.btn_guardar.setObjectName("btn_guardar")
self.horizontalLayout_4.addWidget(self.btn_guardar)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.verticalLayout.addWidget(self.frame_10)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Insetar datos"))
self.groupBox.setTitle(_translate("Form", "Insertar datos de entrenamiento"))
self.label_12.setText(_translate("Form", "Para cada actividad, introduce \"A\" si aprobo, \"R\" si reprobo o \"NP\" si no presento."))
self.label_13.setText(_translate("Form", "Actividad 1:"))
self.label_14.setText(_translate("Form", "Actividad 2:"))
self.label_15.setText(_translate("Form", "Actividad 3:"))
self.label_16.setText(_translate("Form", "Actividad 4:"))
self.label_17.setText(_translate("Form", "Actividad 5:"))
self.label_18.setText(_translate("Form", "Actividad 6:"))
self.label_19.setText(_translate("Form", "Introduce \"SI\" o \"NO\""))
self.label_20.setText(_translate("Form", "Aprobo: "))
self.text_estado.setPlaceholderText(_translate("Form", "Ingrese \"SI\" o \"NO\""))
self.btn_guardar.setText(_translate("Form", "Guardar datos"))
|
JoseVale99/simulador_prediccion_desemepe-o
|
view/insertar.py
|
insertar.py
|
py
| 9,728 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLayout",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSpacerItem",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSpacerItem",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSpacerItem",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSpacerItem",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QMetaObject",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 154,
"usage_type": "name"
}
] |
43229119277
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("shop", "0005_auto_20150527_1127"),
]
operations = [
migrations.AlterField(
model_name="order",
name="key",
field=models.CharField(max_length=40, db_index=True),
),
]
|
stephenmcd/cartridge
|
cartridge/shop/migrations/0006_auto_20150916_0459.py
|
0006_auto_20150916_0459.py
|
py
| 345 |
python
|
en
|
code
| 696 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
}
] |
33359783594
|
from unittest import TestCase
import unittest
# from unittest.mock import patch, Mock
# import csv
# from flask import request, jsonify
import requests
# import sys
#
# sys.path.insert(0, '../../src')
class TestLoadDailyReports(TestCase):
# def setUp(self):
# self.app = app.
def test_load_data_success(self):
f = open("tests/routes/01-01-2021.csv", "rb")
file = f.read()
url = 'https://covid-monitor-61.herokuapp.com/daily_reports/data'
r = requests.post(url, data=file, headers={"Content-Type": "text/csv"})
f.close()
self.assertEqual(r.status_code, 200)
def test_query_data_success(self):
url = 'https://covid-monitor-61.herokuapp.com/daily_reports/cases'
body = {"return_type": "json",
"types": ["Confirmed", "Deaths", "Active"],
"locations":
[
{"Country/Region": "Belgium"},
{"Country/Region": "Canada", "Province/State": "Ontario"},
{"Country/Region": "Australia",
"Province/State": "Queensland",
"Combined_Key": "Australian Capital Territory, Australia"}
]
}
r = requests.post(url, json=body, headers={"Content-Type": "application/json"})
print(r.json())
self.assertEqual(r.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
shin19991207/CSC301-A2
|
tests/routes/test_daily_reports.py
|
test_daily_reports.py
|
py
| 1,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 45,
"usage_type": "call"
}
] |
30059046061
|
from pyswarms.base.base_discrete import DiscreteSwarmBase
import numpy as np
from scipy.spatial import cKDTree
class PerezPSO(DiscreteSwarmBase):
def assertions(self):
"""Assertion method to check various inputs.
Raises
------
KeyError
When one of the required dictionary keys is missing.
ValueError
When the number of neighbors is not within the range
:code:`[0, n_particles]`.
When the p-value is not in the list of values :code:`[1,2]`.
"""
super(PerezPSO, self).assertions()
if not all(key in self.options for key in ('k', 'p')):
raise KeyError('Missing either k or p in options')
if not 0 <= self.k <= self.n_particles:
raise ValueError('No. of neighbors must be between 0 and no. of'
'particles.')
if self.p not in [1, 2]:
raise ValueError('p-value should either be 1 (for L1/Minkowski)'
'or 2 (for L2/Euclidean).')
def __init__(self, n_particles, dimensions, alpha, options, velocity_clamp=None):
"""Initializes the swarm.
Attributes
----------
n_particles : int
number of particles in the swarm.
dimensions : int
number of dimensions in the space.
velocity_clamp : tuple (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum velocity
and the second entry is the maximum velocity. It
sets the limits for velocity clamping.
options : dict with keys :code:`{'c1', 'c2', 'k', 'p'}`
a dictionary containing the parameters for the specific
optimization technique
* c1 : float
cognitive parameter
* c2 : float
social parameter
* w : float
inertia parameter
* k : int
number of neighbors to be considered. Must be a
positive integer less than :code:`n_particles`
* p: int {1,2}
the Minkowski p-norm to use. 1 is the
sum-of-absolute values (or L1 distance) while 2 is
the Euclidean (or L2) distance.
"""
# Initialize logger
# self.logger = logging.getLogger(__name__)
binary = False
# Assign k-neighbors and p-value as attributes
self.k, self.p = options['k'], options['p']
# Initialize parent class
super(PerezPSO, self).__init__(n_particles, dimensions, binary,
options, velocity_clamp)
# Invoke assertions
self.assertions()
# Initialize the resettable attributes
self.reset()
# Set initial glo
self.glo = np.full((1, self.dimensions), np.inf)
self.glo_cost = np.inf
self.y = np.full(self.n_particles, 0)
self.alpha = alpha
self.loc_pos = 0
def optimize(self, objective_func, iters, print_step=1, verbose=1):
"""Optimizes the swarm for a number of iterations.
Performs the optimization to evaluate the objective
function :code:`f` for a number of iterations :code:`iter.`
Parameters
----------
objective_func : function
objective function to be evaluated
iters : int
number of iterations
print_step : int (the default is 1)
amount of steps for printing into console.
verbose : int (the default is 1)
verbosity setting.
Returns
-------
tuple
the local best cost and the local best position among the
swarm.
"""
for i in range(iters):
# Compute cost for current position and personal best
current_cost = objective_func(self.pos)
# Obtain the indices of the best position for each
# neighbour-space, and get the local best cost and
# local best positions from it.
nmin_idx = self._get_neighbors(current_cost) # get index of loc for each neighborhood of the cur position
self.best_cost = current_cost[nmin_idx] # the loc optimum cost for each particle
cost_abs = np.abs(current_cost)
loc_min = cost_abs.min()
if loc_min < np.abs(self.glo_cost):
pos_min_index = np.where(cost_abs == loc_min)[0][0] # index of pos min
self.glo = self.pos[pos_min_index]
self.glo_cost = current_cost[pos_min_index]
del loc_min, cost_abs
# Get the local min realative to each point
self.loc_pos = self.pos[nmin_idx]
self.y = self._get_y(self.loc_pos)
# Perform position velocity update
self._update_velocity() # must be called first
self._update_position()
care = r"""
Iter: {}
glo: {}, {}
Cur_cost: {}
loc_pos: {}
nmin_idx: {}
y: {}
velocity: {}
position: {}
""".format(i, self.glo, self.glo_cost, current_cost, self.loc_pos,
nmin_idx, self.y, self.velocity, self.pos)
if i % print_step == 0:
print(care + "\n\n")
if all_eq(self.pos):
break
if self.glo_cost == 0:
break
# Obtain the final best_cost and the final best_position
# final_best_cost_arg = np.argmin(self.best_cost)
# final_best_cost = np.min(self.best_cost)
# final_best_pos = self.best_pos[final_best_cost_arg]
return self.glo_cost, self.glo
def _get_neighbors(self, pbest_cost):
"""Helper function to obtain the best position found in the
neighborhood. This uses the cKDTree method from :code:`scipy`
to obtain the nearest neighbours
Parameters
----------
pbest_cost : numpy.ndarray of size (n_particles, )
the cost incurred at the historically best position. Will be used
for mapping the obtained indices to its actual cost.
Returns
-------
array of size (n_particles, ) dtype=int64
indices containing the best particles for each particle's
neighbour-space that have the lowest cost
"""
# Use cKDTree to get the indices of the nearest neighbors
tree = cKDTree(self.pos)
_, idx = tree.query(self.pos, p=self.p, k=self.k)
# Map the computed costs to the neighbour indices and take the
# argmin. If k-neighbors is equal to 1, then the swarm acts
# independently of each other.
if self.k == 1:
# The minimum index is itself, no mapping needed.
best_neighbor = pbest_cost[idx][:, np.newaxis].argmin(axis=1)
else:
idx_min = pbest_cost[idx].argmin(axis=1)
best_neighbor = idx[np.arange(len(idx)), idx_min]
return best_neighbor
def _update_velocity(self):
"""Updates the velocity matrix of the swarm.
This method updates the attribute :code:`self.velocity` of
the instantiated object. It is called by the
:code:`self.optimize()` method.
"""
# Define the hyperparameters from options dictionary
c1, c2, w = self.options['c1'], self.options['c2'], self.options['w']
# Compute for cognitive and social terms
cognitive = (c1 * np.random.uniform(0, 1) * (-1 - self.y))
social = (c2 * np.random.uniform(0, 1)
* (1 - self.y))
temp_velocity = (w * self.velocity) + cognitive + social
# Create a mask to clamp the velocities
if self.velocity_clamp is not None:
# Create a mask depending on the set boundaries
min_velocity, max_velocity = self.velocity_clamp[0], \
self.velocity_clamp[1]
_b = np.logical_and(temp_velocity >= min_velocity,
temp_velocity <= max_velocity)
# Use the mask to finally clamp the velocities
self.velocity = np.where(~_b, self.velocity, temp_velocity)
else:
self.velocity = temp_velocity
def _update_position(self):
"""Updates the position matrix of the swarm.
This method updates the attribute :code:`self.pos` of
the instantiated object. It is called by the
:code:`self.optimize()` method.
"""
del self.pos
next_pos = np.random.randint(-2000, 2000, size=self.swarm_size)
_decision = self.y + self.velocity
# print("des: {}".format(_decision))
# mext_pos = np.where(_decision > self.alpha, self.glo, next_pos)
# next_pos = np.where(_decision < self.alpha, self.loc_pos, next_pos)
for i in range(self.n_particles):
if _decision[i] > self.alpha:
next_pos[i] = self.glo
elif _decision[i] < -self.alpha:
next_pos[i] = self.loc_pos[i]
self.pos = next_pos
def _get_y(self, loc):
_y = np.array([])
for i in range(self.n_particles):
if np.array_equal(self.glo, self.pos[i]):
_y = np.concatenate((_y, [1]))
elif np.array_equal(loc[i], self.pos[i]):
_y = np.concatenate((_y, [-1]))
else:
_y = np.concatenate((_y, [0]))
return _y
def _sigmoid(self, x):
"""Helper sigmoid function.
Inputs
------
x : numpy.ndarray
Input vector to compute the sigmoid from
Returns
-------
numpy.ndarray
Output sigmoid computation
"""
return 1 / (1 + np.exp(x))
def all_eq(position):
first = position[0]
for x in position:
if not np.array_equal(x, first):
return False
return True
if __name__ == "__main__":
record_holder = np.fromstring("-1251 -555 -1024 1119 273 -1101 1728 -1835 3 1968 1375 139 -1051 -547 -1531 298") # -16047022661760
# print(record_holder)
from Particle import majic_func as obj_func
from pyswarms.utils.environments import PlotEnvironment
file = open("best_record.txt", "a")
for loop in range(10):
test = PerezPSO(12345, 16, 0.3, {"k": 10, 'c1': 0.8, 'c2': 0.2, 'w': 0.75, 'p': 2})
test.pos = np.random.randint(-2000, 2000, size=test.swarm_size)
test.velocity = np.full(test.n_particles, 0)
# test.pos[0] = record_holder
# print(test.pos)
proposed = test.optimize(obj_func, iters=200, print_step=50)
file.write("{}: {}\n\n".format(proposed[0], proposed[1]))
file.close()
|
Ninalgad/PerezSwarm
|
base_discrete.py
|
base_discrete.py
|
py
| 10,842 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pyswarms.base.base_discrete.DiscreteSwarmBase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "numpy.full",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.cKDTree",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "numpy.logical_and",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "numpy.full",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "Particle.majic_func",
"line_number": 284,
"usage_type": "argument"
}
] |
13138241281
|
from django.contrib import admin
from django.urls import path, include
from django.http import HttpResponse
def homepage(request):
return HttpResponse("you're in the home page, goto polls.")
urlpatterns = [
path('admin/', admin.site.urls),
path('', homepage),
path('polls/', include('polls.urls')),
]
|
callmebhawesh/100-Days-Of-Code
|
Day 31/mysite/mysite/urls.py
|
urls.py
|
py
| 321 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "django.http.HttpResponse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 13,
"usage_type": "call"
}
] |
39993181023
|
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
import open3d as o3d
def mkdirs(path):
try:
os.makedirs(path)
except:
pass
class Saver(object):
def __init__(self, save_dir):
self.idx = 0
self.save_dir = os.path.join(save_dir, "results")
if not os.path.exists(self.save_dir):
mkdirs(self.save_dir)
def save_as_point_cloud(self, depth, rgb, path, mask=None):
h, w = depth.shape
Theta = np.arange(h).reshape(h, 1) * np.pi / h + np.pi / h / 2
Theta = np.repeat(Theta, w, axis=1)
Phi = np.arange(w).reshape(1, w) * 2 * np.pi / w + np.pi / w - np.pi
Phi = -np.repeat(Phi, h, axis=0)
X = depth * np.sin(Theta) * np.sin(Phi)
Y = depth * np.cos(Theta)
Z = depth * np.sin(Theta) * np.cos(Phi)
if mask is None:
X = X.flatten()
Y = Y.flatten()
Z = Z.flatten()
R = rgb[:, :, 0].flatten()
G = rgb[:, :, 1].flatten()
B = rgb[:, :, 2].flatten()
else:
X = X[mask]
Y = Y[mask]
Z = Z[mask]
R = rgb[:, :, 0][mask]
G = rgb[:, :, 1][mask]
B = rgb[:, :, 2][mask]
XYZ = np.stack([X, Y, Z], axis=1)
RGB = np.stack([R, G, B], axis=1)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(XYZ)
pcd.colors = o3d.utility.Vector3dVector(RGB)
o3d.io.write_point_cloud(path, pcd)
def save_samples(self, rgbs, gt_depths, pred_depths, depth_masks=None):
"""
Saves samples
"""
rgbs = rgbs.cpu().numpy().transpose(0, 2, 3, 1)
depth_preds = pred_depths.cpu().numpy()
gt_depths = gt_depths.cpu().numpy()
if depth_masks is None:
depth_masks = gt_depths != 0
else:
depth_masks = depth_masks.cpu().numpy()
for i in range(rgbs.shape[0]):
self.idx = self.idx+1
mkdirs(os.path.join(self.save_dir, '%04d'%(self.idx)))
cmap = plt.get_cmap("rainbow_r")
depth_pred = cmap(depth_preds[i][0].astype(np.float32)/10)
depth_pred = np.delete(depth_pred, 3, 2)
path = os.path.join(self.save_dir, '%04d' % (self.idx) ,'_depth_pred.jpg')
cv2.imwrite(path, (depth_pred * 255).astype(np.uint8))
depth_gt = cmap(gt_depths[i][0].astype(np.float32)/10)
depth_gt = np.delete(depth_gt, 3, 2)
depth_gt[..., 0][~depth_masks[i][0]] = 0
depth_gt[..., 1][~depth_masks[i][0]] = 0
depth_gt[..., 2][~depth_masks[i][0]] = 0
path = os.path.join(self.save_dir, '%04d' % (self.idx), '_depth_gt.jpg')
cv2.imwrite(path, (depth_gt * 255).astype(np.uint8))
path = os.path.join(self.save_dir, '%04d'%(self.idx) , '_pc_pred.ply')
self.save_as_point_cloud(depth_preds[i][0], rgbs[i], path)
path = os.path.join(self.save_dir, '%04d'%(self.idx) , '_pc_gt.ply')
self.save_as_point_cloud(gt_depths[i][0], rgbs[i], path, depth_masks[i][0])
rgb = (rgbs[i] * 255).astype(np.uint8)
path = os.path.join(self.save_dir, '%04d'%(self.idx) , '_rgb.jpg')
cv2.imwrite(path, rgb[:,:,::-1])
|
zhijieshen-bjtu/PanoFormer
|
PanoFormer/saver.py
|
saver.py
|
py
| 3,346 |
python
|
en
|
code
| 79 |
github-code
|
6
|
[
{
"api_name": "os.makedirs",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.repeat",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.repeat",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.PointCloud",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "open3d.io.write_point_cloud",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "open3d.io",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 98,
"usage_type": "call"
}
] |
8352946523
|
from setuptools import find_packages, setup
with open("./README.md") as fp:
description = fp.read()
setup(
name="pyC8",
version="1.1.1",
description="Python SDK for Macrometa Global Data Mesh",
long_description=description,
long_description_content_type="text/markdown",
author="Macrometa",
author_email="[email protected]",
url="https://www.macrometa.com",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=["requests==2.25.1", "six", "websocket-client==0.57.0"],
tests_require=["pytest", "mock", "flake8"],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Documentation :: Sphinx",
],
)
|
Macrometacorp/pyC8
|
setup.py
|
setup.py
|
py
| 1,074 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 15,
"usage_type": "call"
}
] |
29528131446
|
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def home():
return "TP Florian Marques"
@app.route('/means', methods=['GET'])
def meanOfList():
list = request.args.getlist('int', type=int)
if len(list) == 0:
return "Given list is null"
else:
return "Mean of the list is : {}".format(sum(list)/len(list))
|
MarquesFlorian/python_server_testing_florian_marques
|
app.py
|
app.py
|
py
| 362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.request.args.getlist",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
}
] |
5694314611
|
import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.reduce_dim_5 = nn.Conv2d(2048, 256, kernel_size=(1, 1), stride=1, padding=0)
self.reduce_dim_4 = nn.Conv2d(1024, 256, kernel_size=(1, 1), stride=1, padding=0)
self.reduce_dim_3 = nn.Conv2d(512, 256, kernel_size=(1, 1), stride=1, padding=0)
self.reduce_dim_2 = nn.Conv2d(256, 256, kernel_size=(1, 1), stride=1, padding=0)
self.double_conv_5 = self._make_dobule_conv(256, 128)
self.double_conv_4 = self._make_dobule_conv(256, 128)
self.double_conv_3 = self._make_dobule_conv(256, 128)
self.double_conv_2 = self._make_dobule_conv(256, 128)
def _up_add(self, x, y):
(_, _, H, W) = y.size()
x_up = nn.functional.upsample(x, size=(H, W), mode='nearest')
return x_up + y
def _make_dobule_conv(self, in_dims, out_dims):
conv_layer = nn.Sequential(
nn.Conv2d(in_dims, out_dims, kernel_size=(3, 3), stride=1, padding=1),
nn.BatchNorm2d(out_dims),
nn.ReLU(),
nn.Conv2d(out_dims, out_dims, kernel_size=(3, 3), stride=1, padding=1),
nn.BatchNorm2d(out_dims),
nn.ReLU()
)
return conv_layer
def forward(self, c2, c3, c4, c5):
m5 = self.reduce_dim_5(c5)
m4 = self._up_add(m5, self.reduce_dim_4(c4))
m3 = self._up_add(m4, self.reduce_dim_3(c3))
m2 = self._up_add(m3, self.reduce_dim_2(c2))
m5 = self.double_conv_5(m5)
m4 = self.double_conv_4(m4)
m3 = self.double_conv_3(m3)
m2 = self.double_conv_2(m2)
return m5, m4, m3, m2
|
dmdm2002/FPN
|
Model/TopDown.py
|
TopDown.py
|
py
| 1,762 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.upsample",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
}
] |
19601192171
|
#-*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from blog.models import Mypost, MainPage
from blog.forms import CreateForms
# Create your views here.
def index(request):
all_posts = Mypost.objects.all()
maintext = MainPage.objects.all()
# print('all_posts_all')
# print(all_posts)
context = {'maintext': maintext}
return render(request, 'index.html', context)
def BlogList(request):
all_posts = Mypost.objects.all()
# print('all_posts_all')
# print(all_posts)
context = {'all_posts': all_posts}
return render(request, 'blogs_list.html', context)
def PostDitail(request, pk):
post_ditail = Mypost.objects.all().filter(pk=pk)
print('post_ditail')
print(post_ditail[0])
return render(request, 'blog_ditail.html', {'post_ditail': post_ditail[0]})
def CreatePost(request):
if request.method == 'POST':
form = CreateForms(request.POST, request.FILES)
if form.is_valid():
create_post = form.save(commit=False)
create_post.author = request.user
create_post.save()
return redirect('blog:ditail', pk=create_post.pk)
else:
form = CreateForms
return render(request, 'create.html', {'form': form})
def EditPost(request, pk):
# edit_post1 = get_object_or_404(Mypost, pk=pk)
edit_post = Mypost.objects.filter(pk=pk).get()
if request.method == 'POST':
form = CreateForms(request.POST, request.FILES, instance=edit_post)
# print('form')
# print(form)
if form.is_valid():
edit = form.save(commit=False)
# edit.author = request.user
# print('edit.author')
# print(edit.author)
edit.save()
return redirect('blog:ditail', pk=edit.pk)
else:
form = CreateForms(instance=edit_post)
return render(request, 'edit.html', {'form': form})
def DelPost(request, pk):
del_post = Mypost.objects.get(pk=pk)
if request.method == 'POST':
form = CreateForms(request.POST, instance=del_post)
if form.is_valid():
del_post = form.save(commit=False)
del_post.delete()
return redirect('blog:blogs_list')
else:
form = CreateForms(instance=del_post)
return render(request, 'delete.html', {'form': form})
# def PagePostApi(request):
#
# if request.method == 'POST':
# form = CreateForms(request.POST)
# if form.is_valid():
# create_post = form.save(commit=False)
# create_post.author = request.user
# create_post.save()
# return redirect('blog:ditail', pk=create_post.pk)
# else:
# form = CreateForms
# return render(request, 'index.html', {'form': form} )
def ApiBlogjs(request):
return render(request, 'apiblogjs.html', )
def ApiCreateblogjs(request):
return render(request, 'apicreateblogjs.html',)
def ApiDetailblogjs(request, pk):
post_ditail = Mypost.objects.all().filter(pk=pk)
return render(request, 'apidetailblogjs.html',{'post_ditail': post_ditail[0]})
def ApiEditblogjs(request, pk):
post_ditail = Mypost.objects.all().filter(pk=pk)
return render(request, 'apieditblogjs.html',{'post_ditail': post_ditail[0]})
### ResctJS #####
def AllPostsReact(request):
return render(request, 'react_post/allreact.html', )
def CreatePostReact(request):
return render(request, 'react_post/createpostreact.html',)
def DetailPostReact(request, pk):
post_ditail = Mypost.objects.all().filter(pk=pk)
return render(request, 'react_post/detailreact.html',{'post_ditail': post_ditail[0]})
def EditPostReact(request, pk):
post_ditail = Mypost.objects.all().filter(pk=pk)
return render(request, 'react_post/editpostreact.html',{'post_ditail': post_ditail[0]})
|
drhtka/forms_urls_drf
|
blog/views.py
|
views.py
|
py
| 3,836 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "blog.models.MainPage.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "blog.models.MainPage.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "blog.models.MainPage",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.filter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.get",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "blog.forms.CreateForms",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects.all",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "blog.models.Mypost.objects",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "blog.models.Mypost",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 118,
"usage_type": "call"
}
] |
12486639819
|
import multiprocessing
import time
import hashlib
memory = list(range(30_000_000))
def function(name):
for i in range(10):
print("Current:", name, i)
time.sleep(1)
def slow_function(name):
for i in range(10):
print("Current:", name, i)
for j in range(300_000):
hashlib.md5(str(j).encode("utf-8")).hexdigest()
if __name__ == "__main__":
for i in range(3):
name = chr(97 + i)
multiprocessing.Process(target=slow_function, args=(name, )).start()
|
tt-n-walters/21-tuesday-python
|
core/multiple_processes.py
|
multiple_processes.py
|
py
| 524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 26,
"usage_type": "call"
}
] |
38592347384
|
from .atari import Atari
from .obj3d import Obj3D
from torch.utils.data import DataLoader
from object_detector import CLIPort_Dataset
__all__ = ['get_dataset', 'get_dataloader']
def get_dataset(cfg, mode):
assert mode in ['train', 'val', 'test']
return CLIPort_Dataset(cfg.dataset_roots.TABLE, mode)
def get_dataloader(cfg, mode):
assert mode in ['train', 'val', 'test']
batch_size = getattr(cfg, mode).batch_size
shuffle = True if mode == 'train' else False
num_workers = getattr(cfg, mode).num_workers
dataset = get_dataset(cfg, mode)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
|
1989Ryan/paragon
|
object_detector/space/dataset/__init__.py
|
__init__.py
|
py
| 713 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "object_detector.CLIPort_Dataset",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 21,
"usage_type": "call"
}
] |
18453506476
|
from icecream import ic
from stack import Stack
from datetime import datetime
def time_format():
return f'{datetime.now().strftime("%m/%d/%Y, %I:%M:%S")}|> '
ic.configureOutput(prefix=time_format, includeContext=True)
def nextLargestElment(items):
tempStack = Stack()
returnStack = Stack()
tempStack.push(items[0])
print("Intial tempStack:", tempStack.getStack())
print("-----------------------------------------")
for current_item in items[1::]:
print("current_item:", current_item)
print("stack_top_item:", tempStack.peek())
if tempStack.isEmpty() == False:
stack_top_item = tempStack.pop()
while stack_top_item < current_item:
print(str(stack_top_item) + " -- " + str(current_item))
returnStack.push(current_item)
if tempStack.isEmpty():
break
stack_top_item = tempStack.pop()
if stack_top_item > current_item:
tempStack.push(stack_top_item)
tempStack.push(current_item)
print("tempStack:", tempStack.getStack())
print("-----------------------------------------")
while tempStack.isEmpty() == False:
element = tempStack.pop()
returnStack.push(-1)
next = -1
print(str(element) + " -- " + str(next))
return returnStack.getStack()
if __name__ == '__main__':
# ic(nextLargestElment([int(item)
# for item in input("Enter the list items : ").strip().split()]))
ic(nextLargestElment([2, 6, 5, 4, 19]))
|
beharamadhu270405/python-DS
|
stack/next_greatest_element_using_stacks.py
|
next_greatest_element_using_stacks.py
|
py
| 1,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "icecream.ic.configureOutput",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "icecream.ic",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "stack.Stack",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "stack.Stack",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "icecream.ic",
"line_number": 48,
"usage_type": "call"
}
] |
34327083443
|
import pickle
import re
from pathlib import Path
from typing import List
from IPython.display import display
import os.path as op
from datetime import datetime
import pandas as pd
from tqdm.notebook import tqdm
from matplotlib import pyplot as plt
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
)
from src.helpers.mylogger import get_handler
import logging
handler = get_handler()
log = logging.getLogger(__name__)
log.handlers[:] = []
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class EntryNotFoundError(Exception):
pass
class BidirectionalEntriesFoundError(Exception):
pass
class MultipleEntryFoundError(Exception):
pass
class ClassLabels:
DISRUPTIVE = "Disruptive"
NON_DISRUPTIVE = "Non-disruptive"
NOT_AVAILABLE = "N/A"
def unzip_res_range(res_range):
"""
Converts ranges in the form: [2-210] or [3-45,47A,47B,51-67] into lists of strings including all numbers in
these ranges in order .
Items are of type <str>.
"""
res_ranges = res_range.strip()[1:-1].split(',')
index_list = []
for r in res_ranges:
if re.match('.+-.+', r):
a, b = r.split('-')
index_list += [str(n) for n in range(int(a), int(b) + 1)]
else:
index_list.append(r)
return index_list
# print(unzip_res_range("[95-96,98-100,102-103,122,262,266-267,270,273,294]"))
def get_mutation_position(mutation):
return mutation[1:-1]
class CancerValidation:
def __init__(self, interfaces_data_path):
self.interfaces_data = self.load_data(interfaces_data_path)
@staticmethod
def load_data(data_path):
interfaces_data = pd.read_csv(
data_path, sep="\t", usecols=["P1", "P2", "P1_IRES", "P2_IRES"]
)
interfaces_data["P1_IRES"] = interfaces_data["P1_IRES"].apply(lambda x: unzip_res_range(x))
interfaces_data["P2_IRES"] = interfaces_data["P2_IRES"].apply(lambda x: unzip_res_range(x))
return interfaces_data
def check(self, protein: str, mutation: str, interactor: str):
# print(f"Checking ..\n"
# f"> PROTEIN: {protein} \n"
# f"> MUTATION: {mutation} \n"
# f"> INTERACTOR: {interactor}")
try:
data, res = self._get_entry(protein, interactor)
except EntryNotFoundError:
return ClassLabels.NOT_AVAILABLE
mut_pos = get_mutation_position(mutation)
if mut_pos in res:
return ClassLabels.DISRUPTIVE
else:
return ClassLabels.NON_DISRUPTIVE
@staticmethod
def _handle_check_duplicated_entries(data, p_ires) -> List[int]:
"""
Checks if all entries of given data duplicated. Each cell contains list in it.
If all entries are duplicated entries, then we have no problem, just get the res.
"""
# display(data)
# In order for us to check if the entries are duplicated, we'll have to
# convert list item in the cells to tuple. Otherwise, we get the following error:
# TypeError: unhashable type: 'list'
data_tuple = data[["P1_IRES", "P2_IRES"]].applymap(lambda x: tuple(x))
data_tuple.duplicated(keep=False).all()
# no problem, then.
if p_ires == "P1_IRES":
[p1] = data["P1"].unique()
[p2] = data["P2"].unique()
elif p_ires == "P2_IRES":
[p2] = data["P1"].unique()
[p1] = data["P2"].unique()
else:
raise ValueError(f"Illegal argument provided for parameter `p_ires`: {p_ires}")
# check if all entries are duplicated
if data_tuple.duplicated(keep=False).all():
log.warning(
f"Multiple entries but they were duplicated. PROTEIN: {p1}, INTERACTOR: {p2}"
)
[p_res] = data_tuple[p_ires].unique()
p_res = list(p_res)
return p_res
else:
log.error("MultipleEntryError with following data: ")
display(data)
p_res_list = data[p_ires].tolist()
p_res = sorted(
set([item for sublist in p_res_list for item in sublist])
)
log.error(F"Returned RES: {p_res}")
return p_res
# data.to_csv("ERROR_data.csv", index=False)
# raise MultipleEntryFoundError
def _get_entry(self, protein, interactor):
a_b_interface_data = self.interfaces_data[
(self.interfaces_data["P1"] == protein) &
(self.interfaces_data["P2"] == interactor)
]
b_a_interface_data = self.interfaces_data[
(self.interfaces_data["P1"] == interactor) &
(self.interfaces_data["P2"] == protein)
]
# Both of them contains entry -- this is an unlikely situation, unless there is problem with the text file..
if len(a_b_interface_data) != 0 and len(b_a_interface_data) != 0:
raise BidirectionalEntriesFoundError
# First data contains entry and the second one is empty
elif len(a_b_interface_data) != 0 and len(b_a_interface_data) == 0:
if len(a_b_interface_data) != 1:
p1_res = self._handle_check_duplicated_entries(a_b_interface_data, "P1_IRES")
else:
[p1_res] = a_b_interface_data["P1_IRES"]
return a_b_interface_data, p1_res
# First data is empty and the second one contains entry
elif len(a_b_interface_data) == 0 and len(b_a_interface_data) != 0:
if len(b_a_interface_data) != 1:
p2_res = self._handle_check_duplicated_entries(b_a_interface_data, "P2_IRES")
else:
[p2_res] = b_a_interface_data["P2_IRES"]
return b_a_interface_data, p2_res
# Both of them are empty
else:
raise EntryNotFoundError
def validate(
self,
tcga_type: str,
tcga_data: pd.DataFrame,
):
tcga_data_validation = tcga_data.copy()
validation_results = []
for index, row in tqdm(
tcga_data_validation.iterrows(),
total=len(tcga_data_validation)
):
protein = row["UniProt_ID"]
mutation = row["Mutation"]
interactor = row["Interactor_UniProt_ID"]
valid_label = self.check(
protein=protein, mutation=mutation, interactor=interactor
)
# print(f">> RESULT: {valid_label}")
validation_results.append(valid_label)
tcga_data_validation["Validation"] = validation_results
tcga_data_validation["Validation"].value_counts().plot(
kind="bar", title=f"{tcga_type} Validation Results"
)
plt.show()
tcga_data_validation_processed = process_validation_data(tcga_data_validation)
tcga_data_validation_processed["Validation"].value_counts().plot(
kind="bar", title=f"{tcga_type} Validation Processed Results"
)
plt.show()
metrics_data = get_scoring_metrics(tcga_data_validation_processed)
num_entries = len(tcga_data_validation)
counts = tcga_data_validation_processed["Validation"].value_counts().to_dict()
num_disruptive = counts[0]
num_non_disruptive = counts[1]
metrics_data.insert(0, "TCGA", tcga_type)
metrics_data.insert(1, "#_Entries", num_entries)
metrics_data.insert(2, "#_Disruptive", num_disruptive)
metrics_data.insert(3, "#_Non_disruptive", num_non_disruptive)
return {
"data_validation": tcga_data_validation,
"data_validation_processed": tcga_data_validation_processed,
"metrics_data": metrics_data,
}
@staticmethod
def validate_single_class(
tcga_type: str,
output_already_calculated: dict,
single_class: int,
):
"""
Requires the positions to be already calculated.
"""
tcga_data_validation = output_already_calculated["data_validation"]
print(f"Using the class {single_class} only.")
tcga_data_validation = tcga_data_validation[
tcga_data_validation["Prediction"] == single_class
].copy()
tcga_data_validation_processed = process_validation_data(tcga_data_validation)
metrics_data = get_scoring_metrics(tcga_data_validation_processed)
num_entries = len(tcga_data_validation)
counts = tcga_data_validation_processed["Validation"].value_counts().to_dict()
num_disruptive = counts[0]
num_non_disruptive = counts[1]
metrics_data.insert(0, "TCGA", tcga_type)
metrics_data.insert(1, "#_Entries", num_entries)
metrics_data.insert(2, "#_Disruptive", num_disruptive)
metrics_data.insert(3, "#_Non_disruptive", num_non_disruptive)
return {
"data_validation": tcga_data_validation,
"data_validation_processed": tcga_data_validation_processed,
"metrics_data": metrics_data,
}
@staticmethod
def extract_output_dict(name, dict_obj):
folder_path = "outputs"
Path(f"{folder_path}").mkdir(parents=True, exist_ok=True)
current_date = datetime.now().strftime("%Y-%m-%d")
file_name = f"{name}_{current_date}.pickle"
file_path = op.join(folder_path, file_name)
if op.exists(file_path):
raise FileExistsError("File already exists")
pickle.dump(dict_obj, open(file_path, "wb"))
print("Object extracted successfully.")
@staticmethod
def load_output_dict(pickle_path):
obj_loaded = pickle.load(open(pickle_path, "rb"))
return obj_loaded
def test_entry_not_found(df, p, i):
a_b = df[
(df["P1"] == p) &
(df["P2"] == i)
]
b_a = df[
(df["P1"] == i) &
(df["P2"] == p)
]
assert len(a_b) == len(b_a) == 0
def get_scoring_metrics(tcga_validation_data):
y_true = tcga_validation_data["Validation"]
y_pred = tcga_validation_data["Prediction"]
metrics_data = pd.DataFrame(
[
accuracy_score(y_true, y_pred),
balanced_accuracy_score(y_true, y_pred),
f1_score(y_true, y_pred),
precision_score(y_true, y_pred),
recall_score(y_true, y_pred),
matthews_corrcoef(y_true, y_pred),
],
index=["ACCURACY", "BALANCED_ACCURACY", "F1", "PRECISION", "RECALL", "MATTHEWS_COR"]
).T
return metrics_data
def process_validation_data(tcga_data: pd.DataFrame):
"""
Process the validation data.
1. Drop N/A entries
2. Convert Labels as follows:
DISRUPTIVE → 0
NON_DISRUPTIVE → 1
3. Convert its type to int.
"""
tcga_processed = tcga_data[tcga_data["Validation"] != "N/A"].copy()
tcga_processed["Validation"] = tcga_processed["Validation"].replace(
{
ClassLabels.DISRUPTIVE: 0,
ClassLabels.NON_DISRUPTIVE: 1,
}
)
tcga_processed = tcga_processed.astype({"Validation": "int"})
return tcga_processed
|
ibrahimberb/Predicting-Mutation-Effects
|
src/dev/CancerValidation/A1/utils.py
|
utils.py
|
py
| 11,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "src.helpers.mylogger.get_handler",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "tqdm.notebook.tqdm",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.balanced_accuracy_score",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.matthews_corrcoef",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 329,
"usage_type": "attribute"
}
] |
72946767548
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import LatentDirichletAllocation
import os
from time import strftime
# Python 3.5
def load_data(filename):
return np.loadtxt(filename, skiprows=1, delimiter=' ')
def save_predictions(X, model):
filename = 'random_forest_' + strftime('%b%d%H%M%S') + '.csv'
preds = model.predict(X).reshape((len(X), 1))
ids = (np.arange(1, len(X) + 1)).reshape((len(X), 1))
np.savetxt(
os.path.join('predictions', filename),
np.hstack((ids, preds)),
fmt='%d',
delimiter=',',
header='Id,Prediction',
comments=''
)
def decompose(X, d, args={}):
pca_model = LatentDirichletAllocation(n_components=d, **args)
pca_model.fit(X)
return pca_model
def train(X, y, args={}):
model = RandomForestClassifier(**args)
model.fit(X, y)
return model
def test(X, y, model):
return np.sum(model.predict(X) == y) / len(y)
train_raw = load_data('training_data.txt')
n_train = 10000
n_val = len(train_raw) - n_train
X_train, y_train = train_raw[:, 1:][:n_train], train_raw[:, 0][:n_train]
X_val, y_val = train_raw[:, 1:][n_train:], train_raw[:, 0][n_train:]
# reduce dimensions from 1000 to 200
pca_model = decompose(X_train, 10)
X_train_red = pca_model.transform(X_train)
X_val_red = pca_model.transform(X_val)
model = train(X_train_red, y_train, args={})
print('train / val split : %d / %d' % (n_train, n_val))
print('train acc :', test(X_train_red, y_train, model))
print('val acc :', test(X_val_red, y_val, model))
test_raw = load_data('test_data.txt')
X_test = test_raw[:, :]
X_test_red = pca_model.transform(X_test)
# save_predictions(X_test_red, model)
'''
<output>
train / val split : 10000 / 10000
train acc : 0.9892
val acc : 0.6557
'''
|
bchidamb/AmazonFeels
|
shit_tier/random_forest_pca.py
|
random_forest_pca.py
|
py
| 1,890 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.hstack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.LatentDirichletAllocation",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 49,
"usage_type": "call"
}
] |
23907002609
|
import numpy as np
import pandas as pd
import scipy.spatial.distance as spd
import scipy.stats as sps
import sklearn.model_selection as skm
import sklearn.metrics as skmetrics
import matplotlib.pyplot as plt
import seaborn as sb
from hw1_modules import *
# read data from CSV to array
data = np.array(pd.read_csv("train.csv").values)
#separate values and labels into separate arrays
values = data[:,1:]
labels = data[:,0]
#convert labels array to vertical, 2d array of one column
labels = np.expand_dims(labels, axis=1)
#initialize confusion matrix
cf = np.ones((10,10), dtype=int)
#initialize cumulative accuracy
accuracy = 0
#set number of folds
number_folds = 3
#set k for k neighbors
k_neighbors = 3
#create kfold iterating object
kf = skm.KFold(n_splits=number_folds)
for train_idx, test_idx in kf.split(values, labels):
print("Dividing data")
#subset data using indexes generated by kfold object
train_data = values[train_idx]
test_data = values[test_idx]
train_labels = labels[train_idx]
test_lables = labels[test_idx]
#run one iteration of testing with knn
print("Testing data")
predicted_labels = knn_predict_class(train_data, train_labels, test_data, k_neighbors)
print("Accuracy for this run" + str(sum(predicted_labels == test_lables)/len(test_lables)))
#cumulative accuracy
accuracy += sum(predicted_labels == test_lables)/len(test_lables)
#add this run's confusion values to cumulative confusion matrix
cf = cf + skmetrics.confusion_matrix(test_lables, predicted_labels)
#calculate average accuracy from cumulative
accuracy = accuracy/number_folds
print(accuracy)
#Create and display plot for confusion matrix
ax = sb.heatmap(cf, annot=True, fmt="d")
ax.set(xlabel="Predicted Label", ylabel="True Label")
plt.show()
|
terry99999/M_hw1
|
knn.py
|
knn.py
|
py
| 1,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
}
] |
70047131707
|
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
import cv2 as cv
def draw_label(path):
tree = ET.parse(path)
img_out = np.zeros(shape=(1024, 1280))
img_list_x = []
img_list_y = []
for elem in tree.iterfind('object'):
mylist_x = []
mylist_y = []
# print(elem.tag, elem.attrib)
for elem_1 in elem.iterfind('polygon/pt'):
object_x = elem_1.find("x").text
object_y = elem_1.find("y").text
x = int(object_x)
y = 1024 - int(object_y)
if x < 0:
x = 0
if x > 1279:
x = 1279
if y < 0:
y = 0
if y > 1023:
y = 1023
mylist_x.append(x)
mylist_y.append(y)
img_list_x.append(x)
img_list_y.append(y)
img_out.itemset((y, x), 255)
mylist = list(zip(mylist_x, mylist_y))
pts = np.array(mylist, np.int32)
cv.polylines(img_out, [pts], True, (255, 255, 255), 2) # 画线
cv.fillPoly(img_out, [pts], (255, 255, 255)) # 填充内部
Alllist = list(zip(img_list_x, img_list_y)) # 统计标注点
# cv.imwrite('./picture/label.png', img_out)
return img_out
def getlabel(path):
img1 = draw_label(path)
list_out = np.zeros(shape=(1024, 1280))
for i in range(img1.shape[0]):
for j in range(img1.shape[1]):
if img1[i, j] == 255:
list_out[i, j] = 1
return list_out
|
Bagpip/-HSI-
|
label_test.py
|
label_test.py
|
py
| 1,589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "cv2.polylines",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.fillPoly",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
}
] |
40107035382
|
version = "0.8"
import os, io
import chardet
from functools import wraps
from tempfile import mkstemp, mkdtemp
from json import JSONEncoder as _JSONEncoder
from pathlib import Path
from collections import deque
from colorama import Fore as F
markdown = None
class LabelledTree (object) :
def __init__ (self, label, children=[]) :
self.label = str(label)
self.children = list(children)
def _print (self, out, prefix=None, last=True) :
if prefix is None :
out.write(f"{self.label}\n")
elif last :
out.write(f"{prefix}{F.WHITE}└─{F.RESET} {self.label}\n")
else :
out.write(f"{prefix}{F.WHITE}├─{F.RESET} {self.label}\n")
for child in self.children :
if prefix is None :
child._print(out, "", child is self.children[-1])
elif last :
child._print(out, prefix + " ", child is self.children[-1])
else :
child._print(out, prefix + f"{F.WHITE}│{F.RESET} ", child is self.children[-1])
def __str__ (self) :
out = io.StringIO()
self._print(out)
return out.getvalue().rstrip()
class tree (dict) :
def __getattr__ (self, key) :
cls = self.__class__
val = self.get(key, None)
if isinstance(val, dict) and not isinstance(val, cls) :
val = self[key] = tree(val)
elif isinstance(val, list) :
val = self[key] = [tree(v) if isinstance(v, dict) and not isinstance(v, cls)
else v for v in val]
return val
def __setattr__ (self, key, val) :
if isinstance(val, dict) :
val = self.__class__(val)
self[key] = val
cwd = Path().absolute()
def new_path (type="file", **args) :
if type == "file" :
fd, path = mkstemp(**args)
os.close(fd)
elif type == "dir" :
path = mkdtemp(**args)
else :
raise ValueError(f"unsupported path type {type!r}")
return Path(path).absolute().relative_to(cwd)
encoding = tree(encoding="utf-8",
errors="replace")
class JSONEncoder (_JSONEncoder) :
def default (self, obj) :
handler = getattr(obj, "__json__", None)
if handler is None :
return super().default(obj)
else :
return handler()
def cached_property (method) :
@wraps(method)
def wrapper (self) :
name = method.__name__
if not hasattr(self, "__cache") :
self.__cache = {}
if name not in self.__cache :
self.__cache[name] = method(self)
return self.__cache[name]
@wraps(method)
def delete (self) :
self.__cache.pop(method.__name__, None)
return property(wrapper, None, delete, method.__doc__)
def recode (path) :
with open(path, "rb") as inf :
raw = inf.read()
try :
enc = chardet.detect(raw)
src = raw.decode(enc["encoding"], errors="replace")
except :
return
with open(path, "w", **encoding) as out :
out.write(src)
return src
def md (text, inline=True) :
# only load if necessary to speedup prog startup
global markdown
from markdown import markdown
#
try :
html = markdown(str(text))
if inline :
html = html.replace("<p>", "").replace("</p>", "")
return html.replace("§", " ")
except :
return text.replace("§", " ")
_esc = {c : f"\\{c}" for c in r"\`*_{}[]()#+-.!"}
def mdesc (text) :
return str(text).translate(_esc)
def chmod_r (path) :
q = deque([Path(path)])
while q :
sub = q.popleft()
if sub.is_dir() :
sub.chmod(sub.stat().st_mode | 0o750)
q.extend(sub.iterdir())
else :
sub.chmod(sub.stat().st_mode | 0o640)
|
fpom/badass
|
badass/__init__.py
|
__init__.py
|
py
| 3,857 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "colorama.Fore.WHITE",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.WHITE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore.WHITE",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "json.JSONEncoder",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "chardet.detect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "markdown.markdown",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 122,
"usage_type": "call"
}
] |
16644551299
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def ex_deal(df_Int, df_ex):
columns = ['顺序', '氮素', '频率', '刈割']
df_Int = pd.concat([df_Int, pd.DataFrame(columns=columns)])
for item in range(df_Int.shape[0]):
for jtem in range(df_ex.shape[0]):
if int(df_Int.iloc[item, 0]) == int(df_ex.iloc[jtem, 1]):
df_Int.loc[item, '顺序'] = df_ex.iloc[jtem, 1]
df_Int.loc[item, '氮素'] = df_ex.iloc[jtem, 2]
df_Int.loc[item, '频率'] = df_ex.iloc[jtem, 3]
df_Int.loc[item, '刈割'] = df_ex.iloc[jtem, 4]
# df_Int.drop([0, 19], inplace=True)
return df_Int
def MF(g, N="氮素",year=2010):
# if year==2008:
# if N == "刈割":
# g=
# if N == "频率":
#
# else:
if N == "刈割":
if g == 0.0:
g = "nm"
else:
g = "m"
if N == "频率":
if g == 2.0:
g = "l"
elif g == 0.0:
g = "nan"
else:
g = "h"
return g
def loop_chain_nan(year, gb, D, N="氮素"):
# if year == 2008:
# g = MF(g_[0], N,year=2008)
# D["loop"][g], D["nan"][g], D["chain"][g] = 0, 0, 0
# for g_ in gb:
# print(g_[1][3])
# for item in g_[1][3]:
# if item == 0: # 链
# D["chain"][g] += 1
# elif item == -0.15:
# D["nan"][g] += 1
# print(2008,item)
# else: # 环
# D["loop"][g] += 1
if year == 2009:
for g_ in gb:
g = MF(g_[0], N)
# if g!=0.0:
D["loop"][g],D["nan"][g], D["chain"][g] = 0, 0, 0
for item in g_[1][3]:
if item == 0: # 链
D["chain"][g] += 1
elif item == -0.15:
D["nan"][g] += 1
else: # 环
D["loop"][g] += 1
elif year > 2009:
for g_ in gb:
g = MF(g_[0], N)
for item in g_[1][3]:
if item == 0:
D["chain"][g] += 1
elif item == -0.15:
D["nan"][g] += 1
else:
D["loop"][g] += 1
return D
def main():
path = "C:/Users/97899/Desktop/N/"
df_ex = pd.read_excel(path + "实验处理_ex.xls")
ind = np.linspace(2008, 2020, 13)
D = {}
D["loop"], D["chain"], D["nan"] = {}, {}, {}
for year in ind:
df_cir = pd.read_excel(path + "Network/circle20.xls", sheet_name=str(int(year)))
df_cir = ex_deal(df_cir, df_ex)
gb = df_cir.groupby("氮素")
D = loop_chain_nan(year, gb, D)
gm = df_cir.groupby("刈割")
D = loop_chain_nan(year, gm, D, "刈割")
gf = df_cir.groupby("频率")
D = loop_chain_nan(year, gf, D, "频率")
print(D)
net_loop = []
net_chain = []
net_nan = []
'''氮素'''
for key in D["loop"].keys():
sum_ = D["loop"][key] + D["chain"][key] + D["nan"][key]
print(key,sum_)
net_loop.append(D["loop"][key] / sum_)
net_chain.append(D["chain"][key] / sum_)
net_nan.append(D["nan"][key] / sum_)
print("非竞争", len(net_nan), "链", len(net_chain), "环", len(net_loop))
labels = ['N=0', 'N=1', 'N=2', 'N=3', 'N=5', 'N=10', 'N=15', 'N=20', 'N=50']
width = 0.5 # the width of the bars: can also be len(x) sequence
net = (np.array(net_loop) + np.array(net_chain)).tolist()
print("竞争主导", len(net))
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
ax1 = plt.subplot(212)
print(net_loop[:9],net_chain[:9],net_nan[:9])
ax1.bar(labels, net_loop[:9], width, label='ICN', color="darkcyan")
ax1.bar(labels, net_chain[:9], width, bottom=net_loop[:9], label='TCN', color="turquoise")
ax1.bar(labels, net_nan[:9], width, bottom=net[:9], label='SCS', color="yellow")
ax1.set_ylabel('Ratio', fontdict={"size": 20})
ax1.set_xlabel('N addition rater'"$(gNm^{-2}year^{-1})$", fontdict={"size": 15})
width2 = 0.4
'''刈割'''
label_2 = ['No-Mowing', 'Mowing']
ax2 = plt.subplot(221)
ax2.bar(label_2, net_loop[9:11], width2, label='ICN', color="darkcyan")
ax2.bar(label_2, net_chain[9:11], width2, bottom=net_loop[9:11], label='TCN', color="turquoise")
ax2.bar(label_2, net_nan[9:11], width2, bottom=net[9:11], label='SCS', color="yellow")
ax2.set_ylabel('Ratio', fontdict={"size": 20})
ax2.set_xlabel('Mowing', fontdict={"size": 15})
'''频率'''
label_3 = ['Zero','Low (Twice)', 'High (Monthly)']
ax3 = plt.subplot(222) # 222
ax3.bar(label_3, net_loop[11:14], width2, label='ICN', color="darkcyan")
ax3.bar(label_3, net_chain[11:14], width2, bottom=net_loop[11:14], label='TCN', color="turquoise")
ax3.bar(label_3, net_nan[11:14], width2, bottom=net[11:14], label='SCS', color="yellow") # [11:13]
# ax3.set_ylabel('Ratio', fontdict={"size": 15})
ax3.set_xlabel('Frequency', fontdict={"size": 15})
ax3.legend(ncol=1, bbox_to_anchor=(1.2, 1), fontsize=13)
plt.show()
plt.savefig(path+'Figure/bar_distribution.png')
main()
|
QingqingSun-Bao/GitResp2
|
micro/Fig10_bar_distribution.py
|
Fig10_bar_distribution.py
|
py
| 5,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.concat",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 148,
"usage_type": "name"
}
] |
170942993
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from rp_ui_harness import RequestPolicyTestCase
from marionette import expectedFailure
from marionette_driver import Wait
import re
msg = ("Marionette -- test_logging_error_detection -- "
"this test is expected to fail")
class ErrorDetectionTests(object):
################
# Test Methods #
################
def test_normal_error(self, n=1):
self.error_triggerer.trigger_error(
"error", "backgroundscript", msg=msg)
self._do_checks(
n,
r'^console.error:\s+\[RequestPolicy\] ' + msg + '$')
def test_reference_error(self, n=1):
self.error_triggerer.trigger_error(
"ReferenceError", "backgroundscript")
self._do_checks(
n,
(
r"^JavaScript error: "
r"chrome://rpcontinued/content/ui-testing/services\.js, "
r"line [0-9]+: ReferenceError: "
)
)
def test_reference_error_in_promise_chain(self, n=1):
self.error_triggerer.trigger_error(
"ReferenceError:Promise", "backgroundscript")
self._do_checks(
n,
(
r"^JavaScript error: "
r"chrome://rpcontinued/content/ui-testing/services\.js, "
r"line [0-9]+: ReferenceError: "
)
)
##########################
# Private Helper Methods #
##########################
def _do_checks(self, n, message_regexp):
raise NotImplementedError
class ErrorDetectionTestCase(RequestPolicyTestCase):
expected_error = False
def setUp(self):
super(ErrorDetectionTestCase, self).setUp()
self.gecko_log.start_ignoring_errors(expected=self.expected_error)
def tearDown(self):
try:
self.gecko_log.stop_ignoring_errors()
finally:
super(ErrorDetectionTestCase, self).tearDown()
class TestGeckoLog(ErrorDetectionTests, ErrorDetectionTestCase):
def setUp(self):
super(TestGeckoLog, self).setUp()
self._assert_n_errors(0)
##########################
# Private Helper Methods #
##########################
def _do_checks(self, n, message_regexp):
self._assert_n_errors(n)
self._assert_error(message_regexp)
def _get_error_lines_including_ignored_errors(self):
return self.gecko_log.get_error_lines_of_current_test(
return_ignored_as_well=True)
def _get_error_lines(self):
return self.gecko_log.get_error_lines_of_current_test()
def _assert_n_errors(self, n):
Wait(self.marionette).until(
lambda _: (
len(self._get_error_lines_including_ignored_errors()) == n
)
)
self.assertEqual(0, len(self._get_error_lines()))
def _assert_error(self, message_regexp):
error_lines = self._get_error_lines_including_ignored_errors()
line = error_lines[-1]
self.assertTrue(
re.search(message_regexp, line),
msg=("String \"" + line + "\" matched!"))
class TestFailureOnTearDown(ErrorDetectionTests, ErrorDetectionTestCase):
expected_error = True
@expectedFailure
def tearDown(self):
super(TestFailureOnTearDown, self).tearDown()
##########################
# Private Helper Methods #
##########################
# Explicitly do *not* perform checks in _do_checks(), to test if the
# TestRunner's tearDown fn waits long enough to detect all logging errors.
def _do_checks(self, n, message_regexp):
pass
|
RequestPolicyContinued/requestpolicy
|
tests/marionette/rp_puppeteer/tests-quick/test_error_detection.py
|
test_error_detection.py
|
py
| 3,791 |
python
|
en
|
code
| 253 |
github-code
|
6
|
[
{
"api_name": "rp_ui_harness.RequestPolicyTestCase",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "marionette_driver.Wait",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "marionette.expectedFailure",
"line_number": 117,
"usage_type": "name"
}
] |
10426113272
|
from measurements.models import Location, Station, SourceType, Network
from django.contrib.gis.geos import Point
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import pandas as pd
import re
IOC = "http://www.ioc-sealevelmonitoring.org/station.php?code={}"
stations = (
('Trieste', 'TR22'),
('Venice', 'VE19'),
('Ancona', 'AN15'),
('S. Benedetto Del Tronto', 'SB36'),
('Stari Grad', 'stari'),
('Vela Luka', 'vela'),
('Sobra', 'sobr'),
('Otranto', 'OT15'),
('Kerkyra, Corfu', 'corf'),
('Crotone', 'CR08'),
('Le Castella', 'lcst'),
('Itea', 'itea'),
('Panormos', 'pano'),
('Aigio', 'aigi'),
('Katakolo', 'kata'),
# ('Kyparissia', 'kypa'),
)
ioc_source, created = SourceType.objects.get_or_create(code='ioc')
ioc_network, created = Network.objects.get_or_create(code='ioc')
# IOC stations
for label, code in stations:
r = requests.get(IOC.format(code))
# print(r.text)
soup = BeautifulSoup(r.text)
for elem in soup(text='Latitude '):
lat = float(elem.find_next('td').contents[0])
for elem in soup(text='Longitude '):
lon = float(elem.find_next('td').contents[0])
# print(lon, lat)
l, created = Location.objects.get_or_create(label=label)
l.geo = Point(lon, lat)
l.save()
# print(label, l)
s, created = Station.objects.get_or_create(code=code,
label=label,
source=ioc_source,
network=ioc_network,
location=l)
|
CNR-ISMAR/ecoads
|
scripts/import_station_locations.py
|
import_station_locations.py
|
py
| 1,738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "measurements.models.SourceType.objects.get_or_create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "measurements.models.SourceType.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "measurements.models.SourceType",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "measurements.models.Network.objects.get_or_create",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "measurements.models.Network.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "measurements.models.Network",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "measurements.models.Location.objects.get_or_create",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "measurements.models.Location.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "measurements.models.Location",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.geos.Point",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "measurements.models.Station.objects.get_or_create",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "measurements.models.Station.objects",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "measurements.models.Station",
"line_number": 48,
"usage_type": "name"
}
] |
27535721148
|
import json
from web3 import Web3
from decimal import Decimal
from router import*
import time
# add blockchain connection information
cronos_mainnet_rpc = "ws://rpc.vvs.finance/"
w3 = Web3(Web3.WebsocketProvider(cronos_mainnet_rpc, websocket_timeout= 6000))
ERC20ABI = json.load(open('./erc20_abi.abi'))
#getSelector("swapExactTokensForTokens(uint256,uint256,address[],address,uint256)")= 0x38ed1739
#getSelector("swapExactETHForTokens(uint256 amountOutMin, address[] path, address to, uint256 deadline)")= 0x7ff36ab5
#getSelector("swapExactTokensForETH(uint256,uint256,address[],address,uint256)")= 0x18cbafe5
mycontract = '0x109C48345e84459C658e79e806F6DdB236DbDD26'
# multilswap = Web3.toChecksumAddress(mycontract)
# multilswap_abi = json.loads()
# multilswap_contract = w3.eth.contract(address = multilswap, abi= multilswap_abi)
# amountIn = optimalAmount
def dataswap_encode(contract, amountIn, amountOut, path, mycontract):
deadline = 1000
dataswap = contract.encodeABI(fn_name="swapExactTokensForTokens", args=[amountIn,amountOut, path, mycontract,deadline])
return dataswap
def dataswap(route,tokenIn, tokenOut, amountIn, mycontract):
# route = trade['route']
tos = []
tos = [t['router:'] for t in route]
data = []
_tokenInapproveaddr = []
n= 0
for pair in route:
if pair['router:'] == '0x145863Eb42Cf62847A6Ca784e6416C1682b1b2Ae':
contract = VVS_ROUTER_CONTRACT
elif pair['router:'] == '0x145677FC4d9b8F19B5D56d1820c48e0443049a30':
contract = MMF_ROUTER_CONTRACT
elif pair['router:'] == '0xcd7d16fB918511BF7269eC4f48d61D79Fb26f918':
contract = CRONA_ROUTER_CONTRACT
elif pair['router:'] == '0x5bFc95C3BbF50579bD57957cD074fa96a4d5fF9F':
contract = CYBORG_ROUTER_CONTRACT
if n == 0:
amountIn = amountIn
if pair['token0']['address'] == tokenIn['address']:
tokenOut = pair['token1']
else:
tokenOut = pair['token0']
path = [tokenIn['address'],tokenOut['address']]
_tokenInapproveaddr.append(tokenIn['address'])
amountOut_list = contract.functions.getAmountsOut(amountIn, path).call()
amountOut= amountOut_list[1]
print('amountout1:',amountOut)
encode = dataswap_encode(contract, amountIn, amountOut, path, mycontract)
data.append(encode)
# approve =
tokenIn = tokenOut
amountIn = amountOut
if n > 0:
if pair['token0']['address'] == tokenIn['address']:
tokenOut = pair['token1']
else:
tokenOut = pair['token0']
path = [tokenIn['address'],tokenOut['address']]
_tokenInapproveaddr.append(tokenIn['address'])
amountOut_list = contract.functions.getAmountsOut(amountIn, path).call()
amountOut= amountOut_list[1]
print('amountout2:',amountOut)
encode = dataswap_encode(contract, amountIn, amountOut, path, mycontract)
data.append(encode)
# approve =
tokenIn = tokenOut
amountIn = amountOut
n+=1
print('profit:', amountIn - 50*pow(10,18))
return tos, data, _tokenInapproveaddr
|
Galahad091/My-arb-on-fantom
|
test/encode_data.py
|
encode_data.py
|
py
| 2,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "web3.Web3",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "web3.Web3.WebsocketProvider",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
}
] |
23125697422
|
import os
import sys
sys.path.append("..")
import taobaoTry.taobaoTryUtils
from task.logUtils import logUtils
class taobaoTryTask:
def enum(**enums):
return type('Enum', (), enums)
taskType = enum(JingXuan=1, All=2)
mTaskTypeFor = taskType.All
taobaoTryTaskLockFile = ".." + os.path.sep + "lockFile" + os.path.sep + "taobaoTry.lock"
def __init__(self,taskTypeFor=taskType.All):
global mTaskTypeFor
taobaoTryTask.mTaskTypeFor = taskTypeFor
if(taskTypeFor==taobaoTryTask.taskType.JingXuan):
taobaoTryTask.taobaoTryTaskLockFile = ".." + os.path.sep + "lockFile" + os.path.sep + "taobaoJingXuanTry.lock"
else:
taobaoTryTask.taobaoTryTaskLockFile = ".." + os.path.sep + "lockFile" + os.path.sep + "taobaoAllTry.lock"
if (os.path.exists(taobaoTryTask.taobaoTryTaskLockFile)):
if (taskTypeFor == taobaoTryTask.taskType.JingXuan):
logUtils.info("精选类型文件已存在,即将退出")
else:
logUtils.info("所有类型文件已存在,即将退出")
os._exit(0)
else:
# os.mknod('.lock')
if (taskTypeFor == taobaoTryTask.taskType.JingXuan):
logUtils.info("精选类型创建文件")
else:
logUtils.info("所有类型创建文件")
open(taobaoTryTask.taobaoTryTaskLockFile, "w")
if(taskTypeFor==taobaoTryTask.taskType.JingXuan):
self.actionJingXuanTask()
else:
self.actionAllTask()
def __del__(self):
if (os.path.exists(taobaoTryTask.taobaoTryTaskLockFile)):
os.remove(taobaoTryTask.taobaoTryTaskLockFile)
if (taobaoTryTask.mTaskTypeFor == taobaoTryTask.taskType.JingXuan):
logUtils.info("精选类型退出程序")
else:
logUtils.info("所有类型退出程序")
def actionAllTask(self):
logUtils.info("actionAllTask所有")
taobaoTry.taobaoTryUtils.taobaoTryUtils().handlePcTryList(None, 0, 1) # 第一个参数传入负数是精选,传0是所有的都采集
def actionJingXuanTask(self):
logUtils.info("actionAllTask精选")
taobaoTry.taobaoTryUtils.taobaoTryUtils().handlePcTryList(None, -1, 1) # 第一个参数传入负数是精选,传0是所有的都采集
# taobaoTryTask(taobaoTryTask.taskType.JingXuan).actionJingXuanTask()
taobaoTryTask(taobaoTryTask.taskType.JingXuan)
|
tudousiji/pachong
|
taobaoTry/taobaoTryTask.py
|
taobaoTryTask.py
|
py
| 2,503 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "os._exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "taobaoTry.taobaoTryUtils.taobaoTryUtils.taobaoTryUtils",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "taobaoTry.taobaoTryUtils.taobaoTryUtils",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "taobaoTry.taobaoTryUtils",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "task.logUtils.logUtils.info",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "task.logUtils.logUtils",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "taobaoTry.taobaoTryUtils.taobaoTryUtils.taobaoTryUtils",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "taobaoTry.taobaoTryUtils.taobaoTryUtils",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "taobaoTry.taobaoTryUtils",
"line_number": 56,
"usage_type": "name"
}
] |
29522073566
|
import os
import random
import sys
import yaml
import numpy as np
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
ymlfile.close()
if not cfg['use_gpu']:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
seed = cfg['seed']
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
import tensorflow as tf
from tensorflow.keras.datasets.fashion_mnist import load_data
from tensorflow.keras.utils import to_categorical
tf.compat.v1.set_random_seed(seed)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.disable_eager_execution() # To solve the speed problem of TF2
# Deprecated in tf2
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.intra_op_parallelism_threads = 4
config.inter_op_parallelism_threads = 4
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=config))
from utils.deepnetwork import DeepNetwork
from utils.tracker import Tracker
def fashion_mnist(params):
tracker = Tracker(seed, 'fashion_mnist.h5')
# Load dataset
(x_train, y_train), (x_test, y_test) = load_data()
# Preprocessing
# Reshape data as dataset is grayscaled
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
# Convert labels into categorial
n_classes = params['n_classes']
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# Normalize images values
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Create model
model = DeepNetwork.build((28, 28, 1), params)
# Train model
model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['n_epochs'],
validation_data=(x_test, y_test),
shuffle=True)
# Evaluate performance
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# Save model
tracker.save_model(model)
if __name__ == "__main__":
fashion_mnist(cfg['train'])
|
emarche/Fashion-MNIST
|
main.py
|
main.py
|
py
| 2,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "yaml.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.set_random_seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.logging.set_verbosity",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.disable_eager_execution",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.keras.backend.set_session",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.Session",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.get_default_graph",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.tracker.Tracker",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.fashion_mnist.load_data",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "utils.deepnetwork.DeepNetwork.build",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "utils.deepnetwork.DeepNetwork",
"line_number": 62,
"usage_type": "name"
}
] |
19521127011
|
from panda3d.core import CollisionNode, CollisionTube, CollisionBox, AmbientLight, Vec4, DirectionalLight
from FreedomCampaignGame.comm_with_server import ClientLogObject
client_logger = ClientLogObject().client_logger
class GameMap():
def __init__(self, render, load_model_fun):
self.render = render
# 加载环境模型。将加载models文件夹中的environment.egg文件,返回该模型的指针
self.load_model = load_model_fun
self.scene = self.load_model("Models/Environment/environment")
# 重新绘制要 渲染的模型。
self.scene.reparent_to(self.render)
client_logger.info("开始生成地图障碍物...")
self.set_map_solid()
client_logger.info("加载环境光、定向灯光..")
self.load_light_all()
def load_light_all(self):
# 添加环境光, AmbientLight对象是一个节点
self.ambient_light = AmbientLight("ambient light")
self.ambient_light.set_color(Vec4(0.2, 0.2, 0.2, 1))
self.ambient_light_node_path = self.render.attach_new_node(self.ambient_light)
# 环境灯光默认自动影响指示节点下方的所有节点,希望灯光影响所有节点场景,则需要在render(渲染)上指定灯光
self.render.set_light(self.ambient_light_node_path)
# 添加定向光,可指定方向
self.directional_light = DirectionalLight("directional light")
self.directional_light_node_path = self.render.attach_new_node(self.directional_light)
# 一个正面对着你的人,H相当于它左转,P相当于它向左倒也就是顺时针转,R是往前向你这边扑倒旋转,光照射出的位置变动
self.directional_light_node_path.setHpr(45, -45, 0)
self.render.set_light(self.directional_light_node_path)
# 应用着色器生成器,希望它影响的NodePath上调用“ setShaderAuto”即可。这里是将着色器生成器应用于“渲染”
self.render.setShaderAuto()
def set_box_solid(self, size=(0, 1, 1, 1), show=True):
# size, 第一个0暂时不知道干嘛,官网文档也没说明,后面3分别是长宽高
box_solid = CollisionBox(size[0], size[1], size[2], size[3])
box_node = CollisionNode("box")
box_node.add_solid(box_solid)
box = self.render.attach_new_node(box_node)
if show:
box.show()
return box
def set_tube_solid(self, size=(0, 0, 0, 0, 0, 0, 0.4), show=True):
# 管由其起点、终点和半径定义。这里定义的一个管子物体从(-8,0,0)到为(8,0,0),半径为0.4的圆形管道
set_tube_solid = CollisionTube(size[0], size[1], size[2], size[3], size[4], size[5], size[6])
wall_node = CollisionNode("wall")
wall_node.add_solid(set_tube_solid)
wall = self.render.attach_new_node(wall_node)
if show:
wall.show()
return wall
def set_map_solid(self):
# 放一个大的管子
# wall = self.set_tube_solid(size=(-2.0, 0, 0, 2.0, 0, 0, 0.2))
# wall.setY(-3)
# 用box设置楼梯
box = self.set_box_solid(size=(0, 1, 1.5, 0.2))
box.setX(-2)
box.setZ(0.2)
box = self.set_box_solid(size=(0, 1, 1.5, 0.4))
box.setX(-3)
box.setZ(0.4)
box = self.set_box_solid(size=(0, 1, 1.5, 0.6))
box.setX(-4)
box.setZ(0.6)
# 用box设置墙,这里是弄门这里的两面墙
box = self.set_box_solid(size=(0, 3.65, 0.1, 1.5))
box.setY(8.1)
box.setX(-4.3)
box.setZ(1.5)
box = self.set_box_solid(size=(0, 3.65, 0.1, 1.5))
box.setY(8.1)
box.setX(4.3)
box.setZ(1.5)
# 弄门,门栏
box = self.set_box_solid(size=(0, 0.65, 0.1, 0.25))
box.setY(8.2)
box.setZ(0.25)
# 门顶部
box = self.set_box_solid(size=(0, 0.65, 0.1, 0.2))
box.setY(8.1)
box.setZ(2.04)
box = self.set_box_solid(size=(0, 8, 0.1, 1.5))
box.setY(-8.1)
box.setZ(1.5)
box = self.set_box_solid(size=(0, 0.1, 8, 1.5))
box.setX(8.1)
box.setZ(1.5)
box = self.set_box_solid(size=(0, 0.1, 8, 1.5))
box.setX(-8.1)
box.setZ(1.5)
|
optimjiang/my_3d_game
|
game_map.py
|
game_map.py
|
py
| 4,327 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "FreedomCampaignGame.comm_with_server.ClientLogObject",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "panda3d.core.AmbientLight",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "panda3d.core.Vec4",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "panda3d.core.DirectionalLight",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "panda3d.core.CollisionBox",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "panda3d.core.CollisionNode",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "panda3d.core.CollisionTube",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "panda3d.core.CollisionNode",
"line_number": 47,
"usage_type": "call"
}
] |
23055488423
|
"""
Creation:
Author: Martin Grunnill
Date: 2022-11-01
Description: Getting prevelance data for world cup teams.
"""
import copy
import pandas as pd
import datetime
schedule_df = pd.read_csv('data_extraction/Fifa 2022 Group stages matches with venue capacity.csv')
covid_data = pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv')
population_df = pd.read_csv('data_extraction/Population estimates world bank.csv',header=2, index_col='Country Name') # downloaded from https://data.worldbank.org/indicator/SP.POP.TOTL https://api.worldbank.org/v2/en/indicator/SP.POP.TOTL?downloadformat=csv
# need to change covid_data to datetime type
covid_data.date = pd.to_datetime(covid_data.date)
date_to = datetime.datetime(2022, 11, 18)
covid_data = covid_data[covid_data.date<=date_to]
#%%
# select data for only countries in the world cup
countries = set(schedule_df['Team A'].unique().tolist() +
schedule_df['Team B'].unique().tolist())
# looking at the data set (https://covid.ourworldindata.org/data/owid-covid-data.csv) new cases smoothed
# for England and Wales is pooled under United Kingdom
proxies = copy.deepcopy(countries)
proxies.add('United Kingdom')
proxies.remove('England')
proxies.remove('Wales')
covid_data = covid_data[covid_data.location.isin(proxies)]
# sense check to make sure we have selected the right places
selected_proxies = covid_data.location.unique()
len(proxies)==len(selected_proxies)
#%% Selecting most recent available data for new_cases_smoothed
# remove missing data
covid_data.new_cases_smoothed = covid_data.new_cases_smoothed.replace({0:None})
covid_data = covid_data[pd.notnull(covid_data.new_cases_smoothed)]
prevelance_records = []
for country in countries:
if country in ['England', 'Wales']:
proxy = 'United Kingdom'
else:
proxy = country
# select proxie
location_data = covid_data[covid_data.location==proxy]
# latest date for which we have information
latest_date = location_data.date.max()
latest_date_data = location_data[location_data.date==latest_date]
cases_smoothed = latest_date_data.new_cases_smoothed.iloc[0]
if proxy=='South Korea':
population = population_df.loc['Korea, Rep.', '2021']
elif proxy == 'Iran':
population = population_df.loc['Iran, Islamic Rep.', '2021']
else:
population = population_df.loc[proxy,'2021']
entry = {'country': country,
'proxy': proxy,
'date': latest_date,
'case_prevalence': cases_smoothed/population,
}
prevelance_records.append(entry)
prevelance_df = pd.DataFrame(prevelance_records)
#%% Adding data on infection to detection ratio
# Getting data frame
# location of zip file downloaded from https://ghdx.healthdata.org/sites/default/files/record-attached-files/HME_COVID_19_IES_2019_2021_RATIOS.zip
zip_file = 'data_extraction/HME_COVID_19_IES_2019_2021_RATIOS.zip'
# read file
detection_raio_df = pd.read_csv(zip_file)
# Selecting detections/infections
detection_raio_df.measure_name.unique()
detection_raio_df = detection_raio_df[detection_raio_df.measure_name=='Cumulative infection-detection ratio']
# change date to date
detection_raio_df['date'] = pd.to_datetime(detection_raio_df['date'])
detection_raio_df = detection_raio_df[detection_raio_df.date==detection_raio_df.date.max()]
detection_raio_df.location_name = detection_raio_df.location_name.replace({'USA':'United States','UK':'United Kingdom'})
values_list = ['value_mean','value_lower','value_upper']
# Change percent to raw number
for column in values_list:
detection_raio_df[column] = detection_raio_df[column]/100
detection_raio_df.metric_name = 'raw'
# invert values so they are now infections/detected cases.
for column in values_list:
detection_raio_df[column] = detection_raio_df[column]**-1
to_merge = detection_raio_df[detection_raio_df.location_name.isin(proxies)]
to_merge = to_merge[['location_name']+values_list]
prevelance_df = prevelance_df.merge(to_merge, left_on='proxy', right_on='location_name')
prevelance_df.rename(columns={'value_lower': 'ratio_upper',
'value_mean': 'ratio_mean',
'value_upper': 'ratio_lower'},
inplace=True)
prevelance_df['infection_prevalence_lower'] = prevelance_df.case_prevalence*prevelance_df.ratio_lower
prevelance_df['infection_prevalence_mean'] = prevelance_df.case_prevalence*prevelance_df.ratio_mean
prevelance_df['infection_prevalence_upper'] = prevelance_df.case_prevalence*prevelance_df.ratio_upper
# host min and max
host_min = prevelance_df[prevelance_df.country=='Qatar']['infection_prevalence_lower'].tolist()[0]
host_max = prevelance_df[prevelance_df.country=='Qatar']['infection_prevalence_upper'].tolist()[0]
# Everybody elses min max
visior_min = prevelance_df[prevelance_df.country!='Qatar']['infection_prevalence_lower'].min()
visior_max = prevelance_df[prevelance_df.country!='Qatar']['infection_prevalence_upper'].max()
|
LIAM-COVID-19-Forecasting/Modelling-Disease-Mitigation-at-Mass-Gatherings-A-Case-Study-of-COVID-19-at-the-2022-FIFA-World-Cup
|
Running_and_analysing_simulations/parameters/data_extraction/getting_prevelance_data.py
|
getting_prevelance_data.py
|
py
| 5,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.notnull",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 80,
"usage_type": "call"
}
] |
854208264
|
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
import numpy
import scipy
import scipy.ndimage
from Array import *
from Matrix import *
class ArrayImaging(object):
my_namespace = 'numpy|imaging'
class ExtractRGBAChannel(ArrayImaging, Module):
""" Extract a single color channel from an array representing an
RGBA type image. This will return a 2D array with the single channel
specified as the scalar elements """
def compute(self):
im = self.get_input("Image").get_array()
chan = self.get_input("Channel")
ar = im[:,:,chan]
out = NDArray()
out.set_array(ar)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Image", (NDArray, 'Image Array'))
reg.add_input_port(cls, "Channel", (basic.Integer, 'Channel'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class GaussianGradientMagnitude(ArrayImaging, Module):
""" Calculate the Gradient Magnitude of an input NDArray using gaussian derivatives.
The standard-deviation of the Gaussian filter are given for each axis as a sequence
or as a single number, in which case the filter will be isotropic. """
def compute(self):
im = self.get_input("Image")
sigma = self.get_input_list("Sigmas")
if len(sigma) <= 1:
sigma = sigma[0]
der = scipy.ndimage.gaussian_gradient_magnitude(im.get_array(), sigma)
out = NDArray()
out.set_array(der)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Image", (NDArray, 'Image Array'))
reg.add_input_port(cls, "Sigmas", (basic.Float, 'Standard Deviations'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class JointHistogram(ArrayImaging, Module):
""" Calculate the Joint Histogram of 2 inputs. The inputs can be of arbitrary dimension,
but must be equivalently sized. """
def compute(self):
in_x = self.get_input("Array One").get_array()
in_y = self.get_input("Array Two").get_array()
size_x = self.get_input("Bins X")
size_y = self.get_input("Bins Y")
take_log = True
if self.has_input("Log10"):
take_log = self.get_input("Log10")
out_ar = numpy.zeros((size_x, size_y))
min_x = in_x.min()
max_x = in_x.max() - min_x
min_y = in_y.min()
max_y = in_y.max() - min_y
in_x = in_x.flatten()
in_y = in_y.flatten()
for i in xrange(in_x.size):
x_cor = int(((in_x[i] - min_x)/max_x) * (size_x - 1))
y_cor = int(((in_y[i] - min_y)/max_y) * (size_y - 1))
out_ar[x_cor,y_cor] += 1.0
if take_log:
out_ar = out_ar + 1.0
out_ar = scipy.log(out_ar)
out = NDArray()
out_ar = out_ar.transpose()
out_ar = out_ar[::-1]
out.set_array(out_ar)
self.set_output("Joint Histogram", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array One", (NDArray, 'X Axis Input'))
reg.add_input_port(cls, "Array Two", (NDArray, 'Y Axis Input'))
reg.add_input_port(cls, "Log10", (basic.Boolean, 'Use Log of Histogram'), True)
reg.add_input_port(cls, "Bins X", (basic.Integer, 'Number of X Bins'))
reg.add_input_port(cls, "Bins Y", (basic.Integer, 'Number of Y Bins'))
reg.add_output_port(cls, "Joint Histogram", (NDArray, 'Joint Histogram'))
class GaussianSmooth(ArrayImaging, Module):
""" Smooth the Input array with a multi-dimensional gaussian kernel.
The standard-deviation of the Gaussian filter are given for each axis as a sequence
or as a single number, in which case the filter will be isotropic. """
def compute(self):
im = self.get_input("Input Array")
sigma = self.get_input_list("Sigmas")
if len(sigma) <= 1:
sigma = sigma[0]
der = scipy.ndimage.gaussian_filter(im.get_array(), sigma)
out = NDArray()
out.set_array(der)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Image Array'))
reg.add_input_port(cls, "Sigmas", (basic.Float, 'Standard Deviations'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class MedianFilter(ArrayImaging, Module):
""" Smooth the Input array with a multi-dimensional median filter. """
def compute(self):
im = self.get_input("Input Array")
k_size = self.get_input("Size")
der = scipy.ndimage.median_filter(im.get_array(), size=k_size)
out = NDArray()
out.set_array(der)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Image Array'))
reg.add_input_port(cls, "Size", (basic.Integer, 'Kernel Size'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ImageDifference(ArrayImaging, Module):
""" Calculate the difference between two input images. """
def compute(self):
im = self.get_input("Input 1")
im2 = self.get_input("Input 2")
da_ar = im.get_array() - im2.get_array()
da_ar = numpy.abs(da_ar)
out = NDArray()
out.set_array(da_ar)
self.set_output("Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input 1", (NDArray, 'Image Array'))
reg.add_input_port(cls, "Input 2", (NDArray, 'Image Array'))
reg.add_output_port(cls, "Output", (NDArray, 'Output Array'))
class ImageNormalize(ArrayImaging, Module):
""" Move the range of the image to [0,1] """
def compute(self):
im = self.get_input("Input")
im_max = im.get_array().max()
im_ar = im.get_array() / im_max
out = NDArray()
out.set_array(im_ar)
self.set_output("Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input", (NDArray, 'Image Array'))
reg.add_output_port(cls, "Output", (NDArray, 'Output Array'))
class SobelGradientMagnitude(ArrayImaging, Module):
""" Use n-dimensional sobel kernels to compute the gradient magnitude
of an image """
def compute(self):
im = self.get_input("Input").get_array()
mag = numpy.zeros(im.shape)
for i in xrange(im.ndim):
kern = scipy.ndimage.sobel(im, axis=i)
mag += kern*kern
out = NDArray()
out.set_array(numpy.sqrt(mag))
self.set_output("Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input", (NDArray, 'Image Array'))
reg.add_output_port(cls, "Output", (NDArray, 'Output Array'))
|
VisTrails/VisTrails
|
contrib/NumSciPy/Imaging.py
|
Imaging.py
|
py
| 7,502 |
python
|
en
|
code
| 100 |
github-code
|
6
|
[
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.gaussian_gradient_magnitude",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.log",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.gaussian_filter",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.median_filter",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "core.modules.vistrails_module.Module",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.sobel",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 186,
"usage_type": "call"
}
] |
14837094680
|
import frappe
import json
#从前台传入items,客户料号→料号
@frappe.whitelist()
def so_refcode_to_itemcode():
#提取js传入参数
ao_items = json.loads(frappe.form_dict.get("items"))
customer_name = frappe.form_dict.get("customer")
#获取js传入的全部客户料号(非重复)
s_ref_code = {r.get("customer_item_code") for r in ao_items}
#从xx表获取所有客户料号对应的【料号】
item_code_map = dict(
frappe.get_all("Item Customer Detail",
filters = {"customer_name":customer_name,
"ref_code":("in", s_ref_code)
},
fields = ["ref_code", "parent"],
as_list = 1
))
# frappe.msgprint(customer_name+"--"+str(s_ref_code)+"--"+str(items))
#返回行id:料号键值对(字典)
result = {r.get("name"):item_code_map.get(r.get("customer_item_code")) for r in ao_items}
frappe.response["message"] = result
|
cwlong1987/yhen
|
yhen/api/sales_order.py
|
sales_order.py
|
py
| 866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "frappe.form_dict.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "frappe.form_dict",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "frappe.form_dict.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "frappe.form_dict",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "frappe.get_all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "frappe.response",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "frappe.whitelist",
"line_number": 5,
"usage_type": "call"
}
] |
31366334882
|
import torch
import torch.nn as nn
from GAWWN.tools.config import cfg
from GAWWN.tools.tools import replicate
class keyMulD(nn.Module):
def __init__(self):
super(keyMulD, self).__init__()
self.ndf = cfg.GAN.NDF
self.nt_d = cfg.TEXT.TXT_FEATURE_DIM
self.keypoint_dim = cfg.KEYPOINT.DIM
self.conv = nn.Sequential(
nn.Conv2d(self.nt_d + self.ndf * 2, self.ndf * 2, 3, 1, 1),
nn.BatchNorm2d(self.ndf * 2),
nn.LeakyReLU(0.2, True)
)
def forward(self, imgGlobal, prep_txt_d, locs):
prep_txt_d = replicate(prep_txt_d, 2, self.keypoint_dim) # (bs, nt_d, 16)
prep_txt_d = replicate(prep_txt_d, 3, self.keypoint_dim) # (bs, nt_d, 16, 16)
imgTextGlobal = torch.cat((imgGlobal, prep_txt_d), 1) # (bs, nt_d + ndf * 2, 16, 16)
imgTextGlobal = self.conv(imgTextGlobal) # (bs, ndf * 2, 16, 16)
# loc (bs, num_elt, keypoint_dim, keypoint_dim)
locs = torch.sum(locs, 1) # (bs, keypoint_dim, keypoint_dim)
locs = torch.clamp(locs, 0, 1)
locs = replicate(locs, 1, self.ndf * 2)
x = imgTextGlobal * locs
return x
class regionD(nn.Module):
def __init__(self):
super(regionD, self).__init__()
self.ndf = cfg.GAN.NDF
self.num_elt = cfg.KEYPOINT.NUM_ELT
self.F_KeyMulD = keyMulD()
self.conv = nn.Sequential(
nn.Conv2d(self.ndf * 2 + self.num_elt, self.ndf * 2, 1),
nn.BatchNorm2d(self.ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf * 2, self.ndf, 2)
)
self.LReLU = nn.LeakyReLU(0.2, True)
def forward(self, imgGlobal, prep_txt_d, locs):
keyMul = self.F_KeyMulD(imgGlobal, prep_txt_d, locs)
x = torch.cat((keyMul, locs), 1) # (bs, ngf * 2 + num_elt, 16, 16)
x = x.contiguous()
x = self.conv(x)
x = x.mean(3)
x = x.mean(2)
x = self.LReLU(x)
return x
class globalD(nn.Module):
def __init__(self):
super(globalD, self).__init__()
self.ndf = cfg.GAN.NDF
self.nt_d = cfg.TEXT.TXT_FEATURE_DIM
self.convGlobal = nn.Sequential(
nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1),
nn.BatchNorm2d(self.ndf * 4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1),
nn.BatchNorm2d(self.ndf * 8),
nn.LeakyReLU(0.2, True)
)
self.conv = nn.Sequential(
nn.Conv2d(self.ndf * 8 + self.nt_d, self.ndf * 4, 1),
nn.BatchNorm2d(self.ndf * 4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf * 4, self.ndf, 4),
nn.BatchNorm2d(self.ndf),
nn.LeakyReLU(0.2, True)
)
def forward(self, imgGlobal, prep_txt_d):
img = self.convGlobal(imgGlobal) # (bs, ndf * 8, 4, 4)
txtGlobal = replicate(prep_txt_d, 2, 4) # (bs, nt_d, 4)
txtGlobal = replicate(txtGlobal, 3, 4) # (bs, nt_d, 4, 4)
imgTxtGlobal = torch.cat((img, txtGlobal), 1) # (bs, nt_d + ndf * 8, 4 ,4)
imgTxtGlobal = imgTxtGlobal.contiguous()
imgTxtGlobal = self.conv(imgTxtGlobal) # (bs, ndf, 1, 1)
imgTxtGlobal = imgTxtGlobal.view(-1, self.ndf)
return imgTxtGlobal
class Dis(nn.Module):
def __init__(self):
super(Dis, self).__init__()
self.ndf = cfg.GAN.NDF
self.nt = cfg.TEXT.TXT_EMBEDDING_DIM
self.nt_d = cfg.TEXT.TXT_FEATURE_DIM
self.prep_txtD = nn.Sequential(
nn.Linear(self.nt, self.nt_d),
nn.LeakyReLU(0.2, True)
)
self.imgGlobalD = nn.Sequential(
nn.Conv2d(3, self.ndf, 4, 2, 1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf, self.ndf, 4, 2, 1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1),
nn.BatchNorm2d(self.ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(self.ndf * 2, self.ndf * 2, 3, 1, 1),
nn.BatchNorm2d(self.ndf * 2),
nn.LeakyReLU(0.2, True)
)
self.F_regionD = regionD()
self.F_globalD = globalD()
self.judge = nn.Sequential(
nn.Linear(self.ndf * 2, self.ndf),
nn.BatchNorm1d(self.ndf),
nn.LeakyReLU(0.2, True),
nn.Linear(self.ndf, 1),
nn.Sigmoid()
)
def forward(self, img, txt, locs):
prep_txt_d = self.prep_txtD(txt)
image_Global = self.imgGlobalD(img)
region_d = self.F_regionD(image_Global, prep_txt_d, locs)
global_d = self.F_globalD(image_Global, prep_txt_d)
x = torch.cat((region_d, global_d), 1)
x = self.judge(x)
return x
|
LosSherl/GAWWN.Pytorch
|
GAWWN/model/discriminator.py
|
discriminator.py
|
py
| 4,867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.GAN",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.TEXT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.KEYPOINT",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.tools.replicate",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "GAWWN.tools.tools.replicate",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "GAWWN.tools.tools.replicate",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.GAN",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.KEYPOINT",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.GAN",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.TEXT",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.tools.replicate",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "GAWWN.tools.tools.replicate",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.GAN",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.TEXT",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "GAWWN.tools.config.cfg.TEXT",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "GAWWN.tools.config.cfg",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm1d",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 137,
"usage_type": "call"
}
] |
73832000827
|
import numpy as np
import pandas as pd
import scipy.ndimage as nd
from skimage import io as skio
import sys
import getopt
def usage():
print("""
Usage : python3 gen_stacked_tif.py < -i mask.lst>
< -a anno.txt>
< -o output prefix>
[ -b binsize default 20]
""")
def main(argv):
########################
# no args equal to -h
if len(argv) == 0 :
usage()
sys.exit(0)
########################
# default values
mask_lst = ''
annof = ''
binsize = 20
prefix = ''
########################
# parse args
try:
opts, args = getopt.getopt(argv,"hi:o:a:b:",["help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ("-i" ):
mask_lst = arg
elif opt in ("-o" ):
prefix = arg
elif opt in ("-a" ):
annof = arg
elif opt in ("-b" ):
binsize = int(arg)
infos = pd.read_csv(mask_lst,sep=' ',header=None)
infos.columns = ['filename','zvalue']
infos['zvalue'] = infos['zvalue'].astype(int)
infos['zvalue'] = infos['zvalue'] - 1 # start from 0
slices = {}
annos = pd.read_csv(annof,sep=',',header=0)
for i , row in infos.iterrows():
cellmask = np.loadtxt(row['filename'],dtype=int)
y, x = np.nonzero(cellmask)
tmp_draw = pd.DataFrame()
tmp_draw['x'] = x
tmp_draw['y'] = y
tmp_draw['cell'] = cellmask[y,x]
cellmask[y,x] = 0
tmp_draw= tmp_draw.set_index('cell')
slice_anno = annos[ annos['slice_id'] == int(row['zvalue']+1) ].copy()
slice_anno = slice_anno.set_index('cell_id')
tmp_draw['anno'] = slice_anno['anno_id']
tmp_draw = tmp_draw[tmp_draw['anno']!='NA'].copy()
cellmask[tmp_draw['y'],tmp_draw['x']] = 100
#pharynx = tmp_draw[tmp_draw['anno']=='c21']
#gut = tmp_draw[tmp_draw['anno']=='c1']
#neural = tmp_draw[tmp_draw['anno']=='c33']
#cellmask[pharynx['y'],pharynx['x']] = 150
#cellmask[gut['y'],gut['x']] = 200
#cellmask[neural['y'],neural['x']] = 250
h,w = cellmask.shape
affine = np.matrix(np.array([[1.0/binsize,0,0],[0,1.0/binsize,0],[0,0,1]]))
binimage = nd.affine_transform(cellmask.T,affine.I,output_shape=(int(w/binsize),int(h/binsize)),order=0)
slices[row['zvalue']] = binimage.T
H = int(h/binsize)
W = int(w/binsize)
zmax = infos['zvalue'].max()
image_buff = np.zeros((zmax+1,H,W),dtype='uint8')
for x in slices:
image_buff[x,:,:] = slices[x]
skio.imsave(f'{prefix}.tif',image_buff)
if __name__ == "__main__":
main(sys.argv[1:])
|
BGI-Qingdao/4D-BioReconX
|
Preprocess/meshgen/gen_stacked_tif.py
|
gen_stacked_tif.py
|
py
| 2,922 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "sys.exit",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "getopt.getopt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "getopt.GetoptError",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.affine_transform",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "skimage.io.imsave",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 93,
"usage_type": "attribute"
}
] |
32124422720
|
from pages.courses.register_courses_page import RegisterCoursesPage
from utilities.teststatus import TestStatus
import unittest
import pytest
import time
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
class RegisterCoursesTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetup(self, oneTimeSetUp):
self.courses = RegisterCoursesPage(self.driver)
self.ts = TestStatus(self.driver)
@pytest.mark.run(order=1)
def test_invalidEnrollment(self):
"""
1. Call required methods from the page class to perform the test
2. Enter course name
3. Select course
4. Enroll in course
5. Verify error message
6. Test Status.markFinal()
"""
self.courses.enterCourseName("JavaScript")
self.courses.selectCourseToEnroll("JavaScript for beginners")
self.courses.enrollCourse("5241810401821657", "1123", "123")
time.sleep(5)
result2 = self.courses.verifyEnrollFailed()
self.ts.markFinal("test_verifyEnrollment", result2, "Enrollment failed...!")
|
badekarganesh04/selenium-python-framework
|
tests/courses/register_courses_tests.py
|
register_courses_tests.py
|
py
| 1,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pages.courses.register_courses_page.RegisterCoursesPage",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utilities.teststatus.TestStatus",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.mark.run",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.usefixtures",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 8,
"usage_type": "attribute"
}
] |
41708064158
|
import glob
import math
import os
import sys
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
from model.siamese.config import cfg
tqdm.pandas()
"""
Files have to be stored in a structure:
main_folder/
1/
0030.jpg
1080.jpg
...
2/
2400.jpg
...
14/
8800.jpg
...
This structure is going to extract images for 3 classes [1,2,14].
"""
AUTOTUNE = tf.data.experimental.AUTOTUNE
class DataGenerator(tf.keras.utils.Sequence):
def __init__(
self,
folder_path=cfg.TRAIN.DATA_PATH,
file_ext="jpg",
debug=False,
training=True,
exclude_aug=False,
step_size=1
):
"""
Args:
folder_path: string ## Path to folder with video frames
file_ext: string | List[str] (optional) looking for files with this extension
debug: boolean (optional) should generator display any warnings?
"""
self.images = None
self.debug = debug
self.data_path = folder_path
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.shuffle = True
self.training = training
self.step_size = step_size
if not os.path.isdir(folder_path):
print(
"Images folder path {} does not exist. Exiting...".format(folder_path)
)
sys.exit()
images = []
for class_dir in os.scandir(folder_path):
if type(file_ext) is str:
file_ext = [file_ext]
files = []
for ext in file_ext:
pattern = '*'
if exclude_aug:
pattern = '*_*'
files.extend(glob.glob(f"{class_dir.path}/{pattern}.{ext}"))
for i, file in enumerate(sorted(files)):
images.append((file, class_dir.name))
self.org_images = images[::self.step_size]
batched = self.batch_images()
self.images = pd.DataFrame(batched, columns=["path", "label"])
print(
f'Found {len(self.images)} files for {len(self.images["label"].unique())} unique classes'
)
def __len__(self):
return math.ceil(len(self.images) / cfg.TRAIN.BATCH_SIZE)
def add_dataset(self, dataset):
"""
Args:
dataset: List[path, label]
Returns:
"""
self.org_images = self.org_images + dataset
batched = self.batch_images()
self.images = pd.DataFrame(batched, columns=["path", "label"])
print(
f'Found {len(self.images)} files for {len(self.images["label"].unique())} unique classes'
)
def batch_images(self):
images = self.org_images.copy()
random.shuffle(images)
images = pd.DataFrame(images, columns=["path", "label"])
low_class_count = min(images["label"].value_counts())
unique_classes = images["label"].unique()
class_dfs = {}
for class_id in unique_classes:
class_dfs[str(class_id)] = (
images[images["label"] == class_id]
.sample(frac=1)
.reset_index(drop=True)
)
batched = []
for i in range(0, low_class_count - 1, 2):
for class_id in unique_classes:
rows = class_dfs[str(class_id)].loc[[i, i + 1], :]
batched.append(rows.to_numpy())
batched = np.array(batched)
batched = batched.reshape(
(batched.shape[0] * batched.shape[1], batched.shape[2])
)
return batched
@staticmethod
def process_image(image_path, to_input=False):
"""
Args:
image_path: string
to_input: boolean - should image be wrapped into input shape (1, 224, 224, 3)
Returns:
((cfg.NN.INPUT_SIZE, cfg.NN.INPUT_SIZE, 3), class)
"""
image = tf.keras.preprocessing.image.load_img(
image_path, target_size=(cfg.NN.INPUT_SIZE, cfg.NN.INPUT_SIZE)
)
image = tf.keras.preprocessing.image.img_to_array(image)
image = np.expand_dims(image, axis=0)
image - tf.keras.applications.mobilenet_v2.preprocess_input(image)
if to_input:
return image
return image[0]
@staticmethod
def process_label(label):
"""
Args:
label: string
Returns:
int
"""
return int(label)
def get_dataset(self):
"""
Returns:
tf.Dataset
"""
target = (
self.images.pop("label")
.progress_map(DataGenerator.process_label)
.to_numpy()
)
images = (
self.images.pop("path").progress_map(DataGenerator.process_image).to_numpy()
)
reshaped_images = np.concatenate(images).reshape(
(
images.shape[0],
images[1].shape[0],
images[1].shape[1],
images[1].shape[2],
)
)
ds = tf.data.Dataset.from_tensor_slices((reshaped_images, target))
ds = ds.cache()
ds = ds.batch(cfg.TRAIN.BATCH_SIZE)
ds = ds.prefetch(buffer_size=cfg.TRAIN.BATCH_SIZE)
return ds
def on_epoch_end(self):
if self.training:
batched = self.batch_images()
self.images = pd.DataFrame(batched, columns=["path", "label"])
def __getitem__(self, item):
images = self.images.loc[
item * cfg.TRAIN.BATCH_SIZE : (item + 1) * cfg.TRAIN.BATCH_SIZE
]
target = images.pop("label").map(DataGenerator.process_label).to_numpy()
images = images.pop("path").map(DataGenerator.process_image).to_numpy()
reshaped_images = np.concatenate(images).reshape(
(
images.shape[0],
images[1].shape[0],
images[1].shape[1],
images[1].shape[2],
)
)
return reshaped_images, target
|
burnpiro/farm-animal-tracking
|
data/data_generator.py
|
data_generator.py
|
py
| 6,118 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "tqdm.tqdm.pandas",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tensorflow.data",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.image.load_img",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg.NN",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.preprocessing.image.img_to_array",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "numpy.expand_dims",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "tensorflow.data.Dataset.from_tensor_slices",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "model.siamese.config.cfg.TRAIN",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "model.siamese.config.cfg",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 209,
"usage_type": "call"
}
] |
11859412516
|
import logging
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, List, Union
from pandas import DataFrame, to_datetime
from tabulate import tabulate
from freqtrade.constants import (DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN, UNLIMITED_STAKE_AMOUNT,
Config)
from freqtrade.data.metrics import (calculate_cagr, calculate_csum, calculate_market_change,
calculate_max_drawdown)
from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
logger = logging.getLogger(__name__)
def store_backtest_stats(
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
"""
Stores backtest results
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
:param stats: Dataframe containing the backtesting statistics
:param dtappendix: Datetime to use for the filename
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
).with_suffix(recordfilename.suffix)
# Store metadata separately.
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
del stats['metadata']
file_dump_json(filename, stats)
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
def store_backtest_signal_candles(
recordfilename: Path, candles: Dict[str, Dict], dtappendix: str) -> Path:
"""
Stores backtest trade signal candles
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>_signals.pkl will be used
as filename
:param stats: Dict containing the backtesting signal candles
:param dtappendix: Datetime to use for the filename
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}_signals.pkl')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_signals.pkl'
)
file_dump_joblib(filename, candles)
return filename
def _get_line_floatfmt(stake_currency: str) -> List[str]:
"""
Generate floatformat (goes in line with _generate_result_line())
"""
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
'.2f', 'd', 's', 's']
def _get_line_header(first_column: str, stake_currency: str,
direction: str = 'Entries') -> List[str]:
"""
Generate header lines (goes in line with _generate_result_line())
"""
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
'Win Draw Loss Win%']
def generate_wins_draws_losses(wins, draws, losses):
if wins > 0 and losses == 0:
wl_ratio = '100'
elif wins == 0:
wl_ratio = '0'
else:
wl_ratio = f'{100.0 / (wins + draws + losses) * wins:.1f}' if losses > 0 else '100'
return f'{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}'
def _generate_result_line(result: DataFrame, starting_balance: int, first_column: str) -> Dict:
"""
Generate one result dict, with "first_column" as key.
"""
profit_sum = result['profit_ratio'].sum()
# (end-capital - starting capital) / starting capital
profit_total = result['profit_abs'].sum() / starting_balance
return {
'key': first_column,
'trades': len(result),
'profit_mean': result['profit_ratio'].mean() if len(result) > 0 else 0.0,
'profit_mean_pct': result['profit_ratio'].mean() * 100.0 if len(result) > 0 else 0.0,
'profit_sum': profit_sum,
'profit_sum_pct': round(profit_sum * 100.0, 2),
'profit_total_abs': result['profit_abs'].sum(),
'profit_total': profit_total,
'profit_total_pct': round(profit_total * 100.0, 2),
'duration_avg': str(timedelta(
minutes=round(result['trade_duration'].mean()))
) if not result.empty else '0:00',
# 'duration_max': str(timedelta(
# minutes=round(result['trade_duration'].max()))
# ) if not result.empty else '0:00',
# 'duration_min': str(timedelta(
# minutes=round(result['trade_duration'].min()))
# ) if not result.empty else '0:00',
'wins': len(result[result['profit_abs'] > 0]),
'draws': len(result[result['profit_abs'] == 0]),
'losses': len(result[result['profit_abs'] < 0]),
}
def generate_pair_metrics(pairlist: List[str], stake_currency: str, starting_balance: int,
results: DataFrame, skip_nan: bool = False) -> List[Dict]:
"""
Generates and returns a list for the given backtest data and the results dataframe
:param pairlist: Pairlist used
:param stake_currency: stake-currency - used to correctly name headers
:param starting_balance: Starting balance
:param results: Dataframe containing the backtest results
:param skip_nan: Print "left open" open trades
:return: List of Dicts containing the metrics per pair
"""
tabular_data = []
for pair in pairlist:
result = results[results['pair'] == pair]
if skip_nan and result['profit_abs'].isnull().all():
continue
tabular_data.append(_generate_result_line(result, starting_balance, pair))
# Sort by total profit %:
tabular_data = sorted(tabular_data, key=lambda k: k['profit_total_abs'], reverse=True)
# Append Total
tabular_data.append(_generate_result_line(results, starting_balance, 'TOTAL'))
return tabular_data
def generate_tag_metrics(tag_type: str,
starting_balance: int,
results: DataFrame,
skip_nan: bool = False) -> List[Dict]:
"""
Generates and returns a list of metrics for the given tag trades and the results dataframe
:param starting_balance: Starting balance
:param results: Dataframe containing the backtest results
:param skip_nan: Print "left open" open trades
:return: List of Dicts containing the metrics per pair
"""
tabular_data = []
if tag_type in results.columns:
for tag, count in results[tag_type].value_counts().items():
result = results[results[tag_type] == tag]
if skip_nan and result['profit_abs'].isnull().all():
continue
tabular_data.append(_generate_result_line(result, starting_balance, tag))
# Sort by total profit %:
tabular_data = sorted(tabular_data, key=lambda k: k['profit_total_abs'], reverse=True)
# Append Total
tabular_data.append(_generate_result_line(results, starting_balance, 'TOTAL'))
return tabular_data
else:
return []
def generate_exit_reason_stats(max_open_trades: int, results: DataFrame) -> List[Dict]:
"""
Generate small table outlining Backtest results
:param max_open_trades: Max_open_trades parameter
:param results: Dataframe containing the backtest result for one strategy
:return: List of Dicts containing the metrics per Sell reason
"""
tabular_data = []
for reason, count in results['exit_reason'].value_counts().items():
result = results.loc[results['exit_reason'] == reason]
profit_mean = result['profit_ratio'].mean()
profit_sum = result['profit_ratio'].sum()
profit_total = profit_sum / max_open_trades
tabular_data.append(
{
'exit_reason': reason,
'trades': count,
'wins': len(result[result['profit_abs'] > 0]),
'draws': len(result[result['profit_abs'] == 0]),
'losses': len(result[result['profit_abs'] < 0]),
'profit_mean': profit_mean,
'profit_mean_pct': round(profit_mean * 100, 2),
'profit_sum': profit_sum,
'profit_sum_pct': round(profit_sum * 100, 2),
'profit_total_abs': result['profit_abs'].sum(),
'profit_total': profit_total,
'profit_total_pct': round(profit_total * 100, 2),
}
)
return tabular_data
def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
"""
Generate summary per strategy
:param bt_stats: Dict of <Strategyname: DataFrame> containing results for all strategies
:return: List of Dicts containing the metrics per Strategy
"""
tabular_data = []
for strategy, result in bt_stats.items():
tabular_data.append(deepcopy(result['results_per_pair'][-1]))
# Update "key" to strategy (results_per_pair has it as "Total").
tabular_data[-1]['key'] = strategy
tabular_data[-1]['max_drawdown_account'] = result['max_drawdown_account']
tabular_data[-1]['max_drawdown_abs'] = round_coin_value(
result['max_drawdown_abs'], result['stake_currency'], False)
return tabular_data
def generate_edge_table(results: dict) -> str:
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
tabular_data = []
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
'Average Duration (min)']
for result in results.items():
if result[1].nb_trades > 0:
tabular_data.append([
result[0],
result[1].stoploss,
result[1].winrate,
result[1].risk_reward_ratio,
result[1].required_risk_reward,
result[1].expectancy,
result[1].nb_trades,
round(result[1].avg_trade_duration)
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def _get_resample_from_period(period: str) -> str:
if period == 'day':
return '1d'
if period == 'week':
return '1w'
if period == 'month':
return '1M'
raise ValueError(f"Period {period} is not supported.")
def generate_periodic_breakdown_stats(trade_list: List, period: str) -> List[Dict[str, Any]]:
results = DataFrame.from_records(trade_list)
if len(results) == 0:
return []
results['close_date'] = to_datetime(results['close_date'], utc=True)
resample_period = _get_resample_from_period(period)
resampled = results.resample(resample_period, on='close_date')
stats = []
for name, day in resampled:
profit_abs = day['profit_abs'].sum().round(10)
wins = sum(day['profit_abs'] > 0)
draws = sum(day['profit_abs'] == 0)
loses = sum(day['profit_abs'] < 0)
stats.append(
{
'date': name.strftime('%d/%m/%Y'),
'profit_abs': profit_abs,
'wins': wins,
'draws': draws,
'loses': loses
}
)
return stats
def generate_trading_stats(results: DataFrame) -> Dict[str, Any]:
""" Generate overall trade statistics """
if len(results) == 0:
return {
'wins': 0,
'losses': 0,
'draws': 0,
'holding_avg': timedelta(),
'winner_holding_avg': timedelta(),
'loser_holding_avg': timedelta(),
}
winning_trades = results.loc[results['profit_ratio'] > 0]
draw_trades = results.loc[results['profit_ratio'] == 0]
losing_trades = results.loc[results['profit_ratio'] < 0]
holding_avg = (timedelta(minutes=round(results['trade_duration'].mean()))
if not results.empty else timedelta())
winner_holding_avg = (timedelta(minutes=round(winning_trades['trade_duration'].mean()))
if not winning_trades.empty else timedelta())
loser_holding_avg = (timedelta(minutes=round(losing_trades['trade_duration'].mean()))
if not losing_trades.empty else timedelta())
return {
'wins': len(winning_trades),
'losses': len(losing_trades),
'draws': len(draw_trades),
'holding_avg': holding_avg,
'holding_avg_s': holding_avg.total_seconds(),
'winner_holding_avg': winner_holding_avg,
'winner_holding_avg_s': winner_holding_avg.total_seconds(),
'loser_holding_avg': loser_holding_avg,
'loser_holding_avg_s': loser_holding_avg.total_seconds(),
}
def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
""" Generate daily statistics """
if len(results) == 0:
return {
'backtest_best_day': 0,
'backtest_worst_day': 0,
'backtest_best_day_abs': 0,
'backtest_worst_day_abs': 0,
'winning_days': 0,
'draw_days': 0,
'losing_days': 0,
'daily_profit_list': [],
}
daily_profit_rel = results.resample('1d', on='close_date')['profit_ratio'].sum()
daily_profit = results.resample('1d', on='close_date')['profit_abs'].sum().round(10)
worst_rel = min(daily_profit_rel)
best_rel = max(daily_profit_rel)
worst = min(daily_profit)
best = max(daily_profit)
winning_days = sum(daily_profit > 0)
draw_days = sum(daily_profit == 0)
losing_days = sum(daily_profit < 0)
daily_profit_list = [(str(idx.date()), val) for idx, val in daily_profit.items()]
return {
'backtest_best_day': best_rel,
'backtest_worst_day': worst_rel,
'backtest_best_day_abs': best,
'backtest_worst_day_abs': worst,
'winning_days': winning_days,
'draw_days': draw_days,
'losing_days': losing_days,
'daily_profit': daily_profit_list,
}
def generate_strategy_stats(pairlist: List[str],
strategy: str,
content: Dict[str, Any],
min_date: datetime, max_date: datetime,
market_change: float
) -> Dict[str, Any]:
"""
:param pairlist: List of pairs to backtest
:param strategy: Strategy name
:param content: Backtest result data in the format:
{'results: results, 'config: config}}.
:param min_date: Backtest start date
:param max_date: Backtest end date
:param market_change: float indicating the market change
:return: Dictionary containing results per strategy and a strategy summary.
"""
results: Dict[str, DataFrame] = content['results']
if not isinstance(results, DataFrame):
return {}
config = content['config']
max_open_trades = min(config['max_open_trades'], len(pairlist))
start_balance = config['dry_run_wallet']
stake_currency = config['stake_currency']
pair_results = generate_pair_metrics(pairlist, stake_currency=stake_currency,
starting_balance=start_balance,
results=results, skip_nan=False)
enter_tag_results = generate_tag_metrics("enter_tag", starting_balance=start_balance,
results=results, skip_nan=False)
exit_reason_stats = generate_exit_reason_stats(max_open_trades=max_open_trades,
results=results)
left_open_results = generate_pair_metrics(
pairlist, stake_currency=stake_currency, starting_balance=start_balance,
results=results.loc[results['exit_reason'] == 'force_exit'], skip_nan=True)
daily_stats = generate_daily_stats(results)
trade_stats = generate_trading_stats(results)
best_pair = max([pair for pair in pair_results if pair['key'] != 'TOTAL'],
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
worst_pair = min([pair for pair in pair_results if pair['key'] != 'TOTAL'],
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
winning_profit = results.loc[results['profit_abs'] > 0, 'profit_abs'].sum()
losing_profit = results.loc[results['profit_abs'] < 0, 'profit_abs'].sum()
profit_factor = winning_profit / abs(losing_profit) if losing_profit else 0.0
backtest_days = (max_date - min_date).days or 1
strat_stats = {
'trades': results.to_dict(orient='records'),
'locks': [lock.to_json() for lock in content['locks']],
'best_pair': best_pair,
'worst_pair': worst_pair,
'results_per_pair': pair_results,
'results_per_enter_tag': enter_tag_results,
'exit_reason_summary': exit_reason_stats,
'left_open_trades': left_open_results,
# 'days_breakdown_stats': days_breakdown_stats,
'total_trades': len(results),
'trade_count_long': len(results.loc[~results['is_short']]),
'trade_count_short': len(results.loc[results['is_short']]),
'total_volume': float(results['stake_amount'].sum()),
'avg_stake_amount': results['stake_amount'].mean() if len(results) > 0 else 0,
'profit_mean': results['profit_ratio'].mean() if len(results) > 0 else 0,
'profit_median': results['profit_ratio'].median() if len(results) > 0 else 0,
'profit_total': results['profit_abs'].sum() / start_balance,
'profit_total_long': results.loc[~results['is_short'], 'profit_abs'].sum() / start_balance,
'profit_total_short': results.loc[results['is_short'], 'profit_abs'].sum() / start_balance,
'profit_total_abs': results['profit_abs'].sum(),
'profit_total_long_abs': results.loc[~results['is_short'], 'profit_abs'].sum(),
'profit_total_short_abs': results.loc[results['is_short'], 'profit_abs'].sum(),
'cagr': calculate_cagr(backtest_days, start_balance, content['final_balance']),
'profit_factor': profit_factor,
'backtest_start': min_date.strftime(DATETIME_PRINT_FORMAT),
'backtest_start_ts': int(min_date.timestamp() * 1000),
'backtest_end': max_date.strftime(DATETIME_PRINT_FORMAT),
'backtest_end_ts': int(max_date.timestamp() * 1000),
'backtest_days': backtest_days,
'backtest_run_start_ts': content['backtest_start_time'],
'backtest_run_end_ts': content['backtest_end_time'],
'trades_per_day': round(len(results) / backtest_days, 2),
'market_change': market_change,
'pairlist': pairlist,
'stake_amount': config['stake_amount'],
'stake_currency': config['stake_currency'],
'stake_currency_decimals': decimals_per_coin(config['stake_currency']),
'starting_balance': start_balance,
'dry_run_wallet': start_balance,
'final_balance': content['final_balance'],
'rejected_signals': content['rejected_signals'],
'timedout_entry_orders': content['timedout_entry_orders'],
'timedout_exit_orders': content['timedout_exit_orders'],
'canceled_trade_entries': content['canceled_trade_entries'],
'canceled_entry_orders': content['canceled_entry_orders'],
'replaced_entry_orders': content['replaced_entry_orders'],
'max_open_trades': max_open_trades,
'max_open_trades_setting': (config['max_open_trades']
if config['max_open_trades'] != float('inf') else -1),
'timeframe': config['timeframe'],
'timeframe_detail': config.get('timeframe_detail', ''),
'timerange': config.get('timerange', ''),
'enable_protections': config.get('enable_protections', False),
'strategy_name': strategy,
# Parameters relevant for backtesting
'stoploss': config['stoploss'],
'trailing_stop': config.get('trailing_stop', False),
'trailing_stop_positive': config.get('trailing_stop_positive'),
'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset', 0.0),
'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached', False),
'use_custom_stoploss': config.get('use_custom_stoploss', False),
'minimal_roi': config['minimal_roi'],
'use_exit_signal': config['use_exit_signal'],
'exit_profit_only': config['exit_profit_only'],
'exit_profit_offset': config['exit_profit_offset'],
'ignore_roi_if_entry_signal': config['ignore_roi_if_entry_signal'],
**daily_stats,
**trade_stats
}
try:
max_drawdown_legacy, _, _, _, _, _ = calculate_max_drawdown(
results, value_col='profit_ratio')
(drawdown_abs, drawdown_start, drawdown_end, high_val, low_val,
max_drawdown) = calculate_max_drawdown(
results, value_col='profit_abs', starting_balance=start_balance)
# max_relative_drawdown = Underwater
(_, _, _, _, _, max_relative_drawdown) = calculate_max_drawdown(
results, value_col='profit_abs', starting_balance=start_balance, relative=True)
strat_stats.update({
'max_drawdown': max_drawdown_legacy, # Deprecated - do not use
'max_drawdown_account': max_drawdown,
'max_relative_drawdown': max_relative_drawdown,
'max_drawdown_abs': drawdown_abs,
'drawdown_start': drawdown_start.strftime(DATETIME_PRINT_FORMAT),
'drawdown_start_ts': drawdown_start.timestamp() * 1000,
'drawdown_end': drawdown_end.strftime(DATETIME_PRINT_FORMAT),
'drawdown_end_ts': drawdown_end.timestamp() * 1000,
'max_drawdown_low': low_val,
'max_drawdown_high': high_val,
})
csum_min, csum_max = calculate_csum(results, start_balance)
strat_stats.update({
'csum_min': csum_min,
'csum_max': csum_max
})
except ValueError:
strat_stats.update({
'max_drawdown': 0.0,
'max_drawdown_account': 0.0,
'max_relative_drawdown': 0.0,
'max_drawdown_abs': 0.0,
'max_drawdown_low': 0.0,
'max_drawdown_high': 0.0,
'drawdown_start': datetime(1970, 1, 1, tzinfo=timezone.utc),
'drawdown_start_ts': 0,
'drawdown_end': datetime(1970, 1, 1, tzinfo=timezone.utc),
'drawdown_end_ts': 0,
'csum_min': 0,
'csum_max': 0
})
return strat_stats
def generate_backtest_stats(btdata: Dict[str, DataFrame],
all_results: Dict[str, Dict[str, Union[DataFrame, Dict]]],
min_date: datetime, max_date: datetime
) -> Dict[str, Any]:
"""
:param btdata: Backtest data
:param all_results: backtest result - dictionary in the form:
{ Strategy: {'results: results, 'config: config}}.
:param min_date: Backtest start date
:param max_date: Backtest end date
:return: Dictionary containing results per strategy and a strategy summary.
"""
result: Dict[str, Any] = {
'metadata': {},
'strategy': {},
'strategy_comparison': [],
}
market_change = calculate_market_change(btdata, 'close')
metadata = {}
pairlist = list(btdata.keys())
for strategy, content in all_results.items():
strat_stats = generate_strategy_stats(pairlist, strategy, content,
min_date, max_date, market_change=market_change)
metadata[strategy] = {
'run_id': content['run_id'],
'backtest_start_time': content['backtest_start_time'],
}
result['strategy'][strategy] = strat_stats
strategy_results = generate_strategy_comparison(bt_stats=result['strategy'])
result['metadata'] = metadata
result['strategy_comparison'] = strategy_results
return result
###
# Start output section
###
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
headers = _get_line_header('Pair', stake_currency)
floatfmt = _get_line_floatfmt(stake_currency)
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
] for t in pair_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generate small table outlining Backtest results
:param sell_reason_stats: Exit reason metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
'Exit Reason',
'Exits',
'Win Draws Loss Win%',
'Avg Profit %',
'Cum Profit %',
f'Tot Profit {stake_currency}',
'Tot Profit %',
]
output = [[
t.get('exit_reason', t.get('sell_reason')), t['trades'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
t['profit_mean_pct'], t['profit_sum_pct'],
round_coin_value(t['profit_total_abs'], stake_currency, False),
t['profit_total_pct'],
] for t in exit_reason_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
if (tag_type == "enter_tag"):
headers = _get_line_header("TAG", stake_currency)
else:
headers = _get_line_header("TAG", stake_currency, 'Exits')
floatfmt = _get_line_floatfmt(stake_currency)
output = [
[
t['key'] if t['key'] is not None and len(
t['key']) > 0 else "OTHER",
t['trades'],
t['profit_mean_pct'],
t['profit_sum_pct'],
t['profit_total_abs'],
t['profit_total_pct'],
t['duration_avg'],
generate_wins_draws_losses(
t['wins'],
t['draws'],
t['losses'])] for t in tag_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
stake_currency: str, period: str) -> str:
"""
Generate small table with Backtest results by days
:param days_breakdown_stats: Days breakdown metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
period.capitalize(),
f'Tot Profit {stake_currency}',
'Wins',
'Draws',
'Losses',
]
output = [[
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
d['wins'], d['draws'], d['loses'],
] for d in days_breakdown_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_strategy(strategy_results, stake_currency: str) -> str:
"""
Generate summary table per strategy
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
floatfmt = _get_line_floatfmt(stake_currency)
headers = _get_line_header('Strategy', stake_currency)
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
# therefore we slip this column in only for strategy summary here.
headers.append('Drawdown')
# Align drawdown string on the center two space separator.
if 'max_drawdown_account' in strategy_results[0]:
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
else:
# Support for prior backtest results
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
dd_pad_per = max([len(dd) for dd in drawdown])
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
for t, dd in zip(strategy_results, drawdown)]
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
for t, drawdown in zip(strategy_results, drawdown)]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_add_metrics(strat_results: Dict) -> str:
if len(strat_results['trades']) > 0:
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
short_metrics = [
('', ''), # Empty line to improve readability
('Long / Short',
f"{strat_results.get('trade_count_long', 'total_trades')} / "
f"{strat_results.get('trade_count_short', 0)}"),
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
strat_results['stake_currency'])),
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
strat_results['stake_currency'])),
] if strat_results.get('trade_count_short', 0) > 0 else []
drawdown_metrics = []
if 'max_relative_drawdown' in strat_results:
# Compatibility to show old hyperopt results
drawdown_metrics.append(
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
)
drawdown_metrics.extend([
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
if 'max_drawdown_account' in strat_results else (
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
strat_results['stake_currency'])),
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
strat_results['stake_currency'])),
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
strat_results['stake_currency'])),
('Drawdown Start', strat_results['drawdown_start']),
('Drawdown End', strat_results['drawdown_end']),
])
entry_adjustment_metrics = [
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
# command stores these results and newer version of freqtrade must be able to handle old
# results with missing new fields.
metrics = [
('Backtesting from', strat_results['backtest_start']),
('Backtesting to', strat_results['backtest_end']),
('Max open trades', strat_results['max_open_trades']),
('', ''), # Empty line to improve readability
('Total/Daily Avg Trades',
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
('Starting balance', round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])),
('Final balance', round_coin_value(strat_results['final_balance'],
strat_results['stake_currency'])),
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
strat_results['stake_currency'])),
('Total profit %', f"{strat_results['profit_total']:.2%}"),
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
in strat_results else 'N/A'),
('Trades per day', strat_results['trades_per_day']),
('Avg. daily profit %',
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
strat_results['stake_currency'])),
('Total trade volume', round_coin_value(strat_results['total_volume'],
strat_results['stake_currency'])),
*short_metrics,
('', ''), # Empty line to improve readability
('Best Pair', f"{strat_results['best_pair']['key']} "
f"{strat_results['best_pair']['profit_sum']:.2%}"),
('Worst Pair', f"{strat_results['worst_pair']['key']} "
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
('Worst trade', f"{worst_trade['pair']} "
f"{worst_trade['profit_ratio']:.2%}"),
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
strat_results['stake_currency'])),
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
strat_results['stake_currency'])),
('Days win/draw/lose', f"{strat_results['winning_days']} / "
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
('Entry/Exit Timeouts',
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
*entry_adjustment_metrics,
('', ''), # Empty line to improve readability
('Min balance', round_coin_value(strat_results['csum_min'],
strat_results['stake_currency'])),
('Max balance', round_coin_value(strat_results['csum_max'],
strat_results['stake_currency'])),
*drawdown_metrics,
('Market change', f"{strat_results['market_change']:.2%}"),
]
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
else:
start_balance = round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])
stake_amount = round_coin_value(
strat_results['stake_amount'], strat_results['stake_currency']
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
message = ("No trades made. "
f"Your starting balance was {start_balance}, "
f"and your stake was {stake_amount}."
)
return message
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
backtest_breakdown=[]):
"""
Print results for one strategy
"""
# Print results
print(f"Result for strategy {strategy}")
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
if isinstance(table, str):
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
if (results.get('results_per_enter_tag') is not None
or results.get('results_per_buy_tag') is not None):
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
table = text_table_tags(
"enter_tag",
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
print(table)
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
for period in backtest_breakdown:
days_breakdown_stats = generate_periodic_breakdown_stats(
trade_list=results['trades'], period=period)
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
stake_currency=stake_currency, period=period)
if isinstance(table, str) and len(table) > 0:
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_add_metrics(results)
if isinstance(table, str) and len(table) > 0:
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
print(table)
if isinstance(table, str) and len(table) > 0:
print('=' * len(table.splitlines()[0]))
print()
def show_backtest_results(config: Config, backtest_stats: Dict):
stake_currency = config['stake_currency']
for strategy, results in backtest_stats['strategy'].items():
show_backtest_result(
strategy, results, stake_currency,
config.get('backtest_breakdown', []))
if len(backtest_stats['strategy']) > 1:
# Print Strategy summary table
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
print(f"{results['backtest_start']} -> {results['backtest_end']} |"
f" Max open trades : {results['max_open_trades']}")
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
print(table)
print('=' * len(table.splitlines()[0]))
print('\nFor more details, please look at the detail tables above')
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
if config.get('backtest_show_pair_list', False):
for strategy, results in backtest_stats['strategy'].items():
print(f"Pairs for Strategy {strategy}: \n[")
for result in results['results_per_pair']:
if result["key"] != 'TOTAL':
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
print("]")
|
robcaulk/freqai
|
freqtrade/optimize/optimize_reports.py
|
optimize_reports.py
|
py
| 41,632 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.file_dump_json",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "freqtrade.optimize.backtest_caching.get_backtest_metadata_filename",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.file_dump_json",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "freqtrade.constants.LAST_BT_RESULT_FN",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "pathlib.Path",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.file_dump_json",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.joinpath",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.file_dump_joblib",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.decimals_per_coin",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "tabulate.tabulate",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 395,
"usage_type": "argument"
},
{
"api_name": "freqtrade.data.metrics.calculate_cagr",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "freqtrade.constants.DATETIME_PRINT_FORMAT",
"line_number": 452,
"usage_type": "argument"
},
{
"api_name": "freqtrade.constants.DATETIME_PRINT_FORMAT",
"line_number": 454,
"usage_type": "argument"
},
{
"api_name": "freqtrade.misc.decimals_per_coin",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "freqtrade.data.metrics.calculate_max_drawdown",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "freqtrade.data.metrics.calculate_max_drawdown",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "freqtrade.data.metrics.calculate_max_drawdown",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "freqtrade.constants.DATETIME_PRINT_FORMAT",
"line_number": 515,
"usage_type": "argument"
},
{
"api_name": "freqtrade.constants.DATETIME_PRINT_FORMAT",
"line_number": 517,
"usage_type": "argument"
},
{
"api_name": "freqtrade.data.metrics.calculate_csum",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 538,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 540,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 540,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 549,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 549,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 550,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 550,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 550,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 551,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 561,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 561,
"usage_type": "name"
},
{
"api_name": "freqtrade.data.metrics.calculate_market_change",
"line_number": 566,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 552,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 552,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "tabulate.tabulate",
"line_number": 606,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 637,
"usage_type": "name"
},
{
"api_name": "tabulate.tabulate",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 687,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 725,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 737,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 739,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 753,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 780,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 793,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 809,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 822,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 833,
"usage_type": "call"
},
{
"api_name": "freqtrade.constants.UNLIMITED_STAKE_AMOUNT",
"line_number": 837,
"usage_type": "name"
},
{
"api_name": "freqtrade.misc.round_coin_value",
"line_number": 835,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 846,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 846,
"usage_type": "name"
},
{
"api_name": "freqtrade.constants.Config",
"line_number": 902,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 902,
"usage_type": "name"
},
{
"api_name": "freqtrade.constants.Config",
"line_number": 922,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 922,
"usage_type": "name"
}
] |
44717248733
|
# Quick and dirty utility to get coordinates for transforming view into
# a bird's eye view. Useful in OCRs were the camera is in a fixed positioning
# viewing a straight plane.
import cv2
import numpy as np
def onTrackbarChange(trackbarValue):
pass
def order_points(pts):
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def expandPerspective(rect , width, height):
'''Expand the perspective out to the image limits
by finding intersection using point-slope form'''
# Constants
x = 0
y = 1
# Convert coordinate system
rect[:,1] *= -1
(tl, tr, br, bl) = rect
# Find the slope of each of the 4 lines
slopeTop = (tr[y]-tl[y]) / (tr[x]-tl[x])
slopeBottom = (br[y]-bl[y]) / (br[x]-bl[x])
slopeLeft = (tl[y]-bl[y]) / (tl[x]-bl[x])
slopeRight = (tr[y]-br[y]) / (tr[x]-br[x])
# Assign new points based on image size
pointRight = width,0
pointTop = 0,0
pointBottom = width, height * -1.0
pointLeft = 0, height* -1.0
# Find where the new expanded lines intersect using point slope form
def intersectoin (m1,m2,x1,x2,y1,y2,orig):
x = ((m2*x2-m1*x1)-(y2-y1))/(m2-m1)
#y = ((-1.0*m1*y2 + m1*m2*x2 + y1*m2 )-(m1*m2*x1))/(m2-m1)
y = m1*(x - x1) + y1
try:
x = round(x)
y = round(y)
except:
return orig
return x, y
new_tr = intersectoin (slopeTop,slopeRight,pointTop[x],pointRight[x],pointTop[y],pointRight[y],tr)
new_tl = intersectoin (slopeTop,slopeLeft,pointTop[x],pointLeft[x],pointTop[y],pointLeft[y],tl)
new_br = intersectoin (slopeBottom,slopeRight,pointBottom[x],pointRight[x],pointBottom[y],pointRight[y],br)
new_bl = intersectoin (slopeBottom,slopeLeft,pointBottom[x],pointLeft[x],pointBottom[y],pointLeft[y],bl)
# Convert coordinate system back
new_rect = rect = np.array([new_tl, new_tr, new_br, new_bl], dtype = "float32")
new_rect[:,1] *= -1
return new_rect
# Derived from https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example
def four_point_transform(image, pts):
# Unpack points
rect = pts
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordinates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0], #tl
[maxWidth - 1, 0], #tr
[maxWidth - 1, maxHeight - 1], #br
[0, maxHeight - 1]], #bl
dtype = "float32")
# Move image to positive coordinates
min_x = round(abs(np.min(rect[:,0])))
min_y = round(abs(np.min(rect[:,1])))
T = np.matrix( [[ 1 , 0 , min_x], # Get min x
[ 0 , 1 , min_y ], # Get min y
[ 0 , 0 , 1 ]],
dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, T * M , (maxWidth + min_x , maxHeight + min_y), borderMode=cv2.BORDER_TRANSPARENT)
# return the warped image
return warped
# Open Image
img = cv2.imread('img\\example1.jpeg')
# Open windows for control, original image, and result
cv2.namedWindow('Control', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Main', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Birds Eye', cv2.WINDOW_AUTOSIZE)
# Track bars for coordinates
cv2.createTrackbar( 'X L Bot', 'Control', 0, img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y L Bot', 'Control', img.shape[0], img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X L Top', 'Control', 0, img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y L Top', 'Control', 0, img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X R Top', 'Control', img.shape[1], img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y R Top', 'Control', 0, img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X R Bot', 'Control', img.shape[1], img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y R Bot', 'Control', img.shape[0], img.shape[0], onTrackbarChange )
# Loop
while(1):
# Get Track Bar positions
pts = np.array(eval('[(' + str(cv2.getTrackbarPos('X L Bot','Control')) + ',' + str(cv2.getTrackbarPos('Y L Bot','Control')) + '),' +
'(' + str(cv2.getTrackbarPos('X L Top','Control')) + ',' + str(cv2.getTrackbarPos('Y L Top','Control'))+ '),' +
'(' + str(cv2.getTrackbarPos('X R Top','Control')) + ',' + str(cv2.getTrackbarPos('Y R Top','Control'))+ '),' +
'(' + str(cv2.getTrackbarPos('X R Bot','Control')) + ',' + str(cv2.getTrackbarPos('Y R Bot','Control'))+ ')]'
), dtype = "int32")
# Draw the perspective
imgConnectedPoints = cv2.polylines(img.copy(), [pts], isClosed = True, color = (0,255,0), thickness = 3)
cv2.imshow('Main',imgConnectedPoints)
# Draw the transformed bird's eye view
warped = four_point_transform(img, expandPerspective(order_points(pts), img.shape[1], img.shape[0]))
cv2.imshow('Birds Eye',warped)
# Exit
if cv2.waitKey(1)==27:
exit(0)
cv.detroyAllWindows()
|
hellkrusher/BirdsEyePerspectiveTransformationUtility
|
BirdsEyePerspectiveTransformationUtility.py
|
BirdsEyePerspectiveTransformationUtility.py
|
py
| 6,413 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cv2.getPerspectiveTransform",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_TRANSPARENT",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_AUTOSIZE",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "cv2.namedWindow",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_AUTOSIZE",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "cv2.namedWindow",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_AUTOSIZE",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cv2.getTrackbarPos",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cv2.getTrackbarPos",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "cv2.getTrackbarPos",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "cv2.getTrackbarPos",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cv2.polylines",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 166,
"usage_type": "call"
}
] |
72465379387
|
from django import forms
from .models import Meme
class MemeForm(forms.ModelForm):
class Meta():
model = Meme
fields = ('description','category','meme_img')
widgets = {
'description': forms.TextInput(attrs={
'class': 'field',
'placeholder': 'Enter Description'
}),
'category': forms.Select(choices=model.CATEGORY_CHOICES, attrs={
'class': 'choice-control',
'placeholder': 'Choose category',
}),
'meme_img': forms.FileInput(attrs={
'class': 'upload-control',
'placeholder': 'Choose file',
})
}
|
omroczkowski/h8gag
|
meme/forms.py
|
forms.py
|
py
| 701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "models.Meme",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.Select",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.forms.FileInput",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 17,
"usage_type": "name"
}
] |
15387648798
|
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def dataset():
(X, y) = make_blobs(n_samples=250, n_features=2, centers=2,
cluster_std=1.05, random_state=20)
X = np.c_[np.ones((X.shape[0])), X]
return X,y
def initialize_weights(p):
return np.random.uniform(size = p)
def make_predictions(X,W,link):
return link(X.dot(W))
def cross_entropy(y,preds):
y = np.array([y])
return -np.sum(y*np.log(preds)+(1-y)*np.log(1-preds))/y.shape[0]
def compute_gradient(preds,X,y,cost=cross_entropy,link = sigmoid):
y = np.array([y])
if cost == cross_entropy and link == sigmoid:
return X.T.dot(preds-y)/y.shape[0]
def sgd(X,y,cost = cross_entropy,link=sigmoid,
alpha = 0.01,eps = 0.0001, maxit = 1000):
W = initialize_weights(X.shape[1])
n = X.shape[0]
ind = np.random.permutation(np.arange(n))
X = X[ind]
y = y[ind]
i = 0
losses = []
preds = make_predictions(X[i:i+1,:],W,link)
losses.append(cost(y[i],preds))
it = 0
while True:
it += 1
print("Iteration n.{}".format(it))
gradient = compute_gradient(preds,X[i:i+1,:],y[i],cost,link)
W -= alpha*gradient
i = i + 1
if i == n:
ind = np.random.permutation(np.arange(n))
X = X[ind]
y = y[ind]
i = 0
preds = make_predictions(X[i:i+1,:],W,link)
l_new = cost(y[i],preds)
losses.append(l_new)
if it == maxit:
break
"""if it > 250 and abs(l_new-losses[it-250])<eps:
break"""
return W,losses
#generate the data
X,y = dataset()
# plot the points
plt.scatter(X[:, 1], X[:, 2], marker="o", c=y)
theta,losses = sgd(X,y,cost = cross_entropy,link = sigmoid,
alpha = 0.01,eps = 0.0001, maxit = 1000)
Y = (-theta[0] - (theta[1] * X)) / theta[2]
plt.plot(X, Y, "r-")
# construct a figure that plots the loss over time
fig = plt.figure()
plt.plot(np.arange(0, len(losses)), losses)
fig.suptitle("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show()
|
nickruggeri/Machine_Learning
|
AdaGrad, ADAM and AMSGrad/Codes/my_sgd.py
|
my_sgd.py
|
py
| 2,198 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.exp",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.samples_generator.make_blobs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.c_",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
}
] |
2677444598
|
# This code is based on https://github.com/openai/guided-diffusion
"""
Train a diffusion model on images.
"""
import os
import json
from mdm_utils.fixseed import fixseed
from mdm_utils.parser_util import train_args
from mdm_utils import dist_util
from train_utils.train_loop import TrainLoop
from mdm_utils.model_util import create_model_and_diffusion
from train_utils.train_platforms import ClearmlPlatform, TensorboardPlatform, NoPlatform # required for the eval operation
from train_utils.ted_loader import build_dataloader
def main():
args = train_args()
save_dir = f"{args.save_dir}/{args.exp}"
args.save_dir = save_dir
print("save_dir:", save_dir)
fixseed(args.seed)
train_platform_type = eval(args.train_platform_type)
train_platform = train_platform_type(args.save_dir)
train_platform.report_args(args, name='Args')
args_path = os.path.join(args.save_dir, 'args.json')
with open(args_path, 'w') as fw:
json.dump(vars(args), fw, indent=4, sort_keys=True)
dist_util.setup_dist(args.device)
print("creating data loader...")
data = build_dataloader('train', args, shuffle = True)
print("creating model and diffusion...")
lang_model = data.dataset.lang_model
args.lang_model = lang_model
model, diffusion = create_model_and_diffusion(args, '')
model.to(dist_util.dev())
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters_wo_clip()) / 1000000.0))
print("Training...")
TrainLoop(args, train_platform, model, diffusion, data).run_loop()
train_platform.close()
if __name__ == "__main__":
main()
|
zyhbili/LivelySpeaker
|
scripts/train_RAG.py
|
train_RAG.py
|
py
| 1,624 |
python
|
en
|
code
| 38 |
github-code
|
6
|
[
{
"api_name": "mdm_utils.parser_util.train_args",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "mdm_utils.fixseed.fixseed",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "mdm_utils.dist_util.setup_dist",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "mdm_utils.dist_util",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "train_utils.ted_loader.build_dataloader",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "mdm_utils.model_util.create_model_and_diffusion",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mdm_utils.dist_util.dev",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "mdm_utils.dist_util",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "train_utils.train_loop.TrainLoop",
"line_number": 43,
"usage_type": "call"
}
] |
45386087756
|
"""
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from theory.conf import settings
from theory.core.files.base import File
from theory.core.files import temp as tempfile
from theory.utils.encoding import forceStr
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, contentType=None, size=None, charset=None, contentTypeExtra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.contentType = contentType
self.charset = charset
self.contentTypeExtra = contentTypeExtra
def __repr__(self):
return forceStr("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.contentType))
def _getName(self):
return self._name
def _setName(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_getName, _setName)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, contentType, size, charset, contentTypeExtra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, contentType, size, charset, contentTypeExtra)
def temporaryFilePath(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.closeCalled and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, fieldName, name, contentType, size, charset, contentTypeExtra=None):
super(InMemoryUploadedFile, self).__init__(file, name, contentType, size, charset, contentTypeExtra)
self.fieldName = fieldName
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunkSize=None):
self.file.seek(0)
yield self.read()
def multipleChunks(self, chunkSize=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, contentType='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
contentType, len(content), None, None)
@classmethod
def fromDict(cls, fileDict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(fileDict['filename'],
fileDict['content'],
fileDict.get('content-type', 'text/plain'))
|
grapemix/theory
|
theory/core/files/uploadedfile.py
|
uploadedfile.py
|
py
| 3,916 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "theory.core.files.base.File",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "theory.utils.encoding.forceStr",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "theory.conf.settings.FILE_UPLOAD_TEMP_DIR",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "theory.conf.settings",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "theory.core.files.temp.NamedTemporaryFile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "theory.core.files.temp",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "theory.conf.settings.FILE_UPLOAD_TEMP_DIR",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "theory.conf.settings",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "theory.core.files.temp.NamedTemporaryFile",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "theory.core.files.temp",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "errno.ENOENT",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 114,
"usage_type": "call"
}
] |
369174475
|
from typing import Optional
#se verifica daca treeul este symetric
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
if not root or not root.left and not root.right:
return True
stack=[root.left,root.right]
while stack:
num_items=len(stack)
for i in range(num_items//2):
node1=stack.pop()
node2=stack.pop()
if node1 and not node2 or node2 and not node1:
return False
elif not node1 and not node2:
continue
elif node1.data!=node2.data:
return False
else:
stack.append(node1.left)
stack.append(node2.right)
stack.append(node1.right)
stack.append(node2.left)
return True
root=TreeNode()
root.data="root"
root.left=TreeNode()
root.left.data = "a"
root.right = TreeNode()
root.right.data = "a"
root.left.left=TreeNode()
root.left.left.data="a"
root.right.right=TreeNode()
root.right.right.data="a"
if Solution.isSymmetric(self=Solution,root=root):
print("true")
else:
print("false")
|
ArdaiArtur/PY
|
LeetCode/SymetricTree.py
|
SymetricTree.py
|
py
| 1,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "name"
}
] |
71623457467
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:31:08 2020
@author: dkafkes
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('master_stdev.csv', header = 0, skiprows = list(np.arange(1, 177)))
df.drop(columns = ['Filename'], inplace = True)
df = df.set_index('Unnamed: 0')
#%%
x = df['B:IMINER']
array, bins, patches = plt.hist(x, bins = 100)
plt.title("B:IMINER Standard Deviation Spread")
plt.xlabel("Average Standard Deviation")
plt.ylabel("Log(Files)")
plt.ylim(0.1, 1000)
plt.semilogy()
plt.show()
|
dkafkes/simplified-ai-for-accelerators
|
data pipeline/histogram.py
|
histogram.py
|
py
| 577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.semilogy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
}
] |
72066886907
|
import sys, os, re
import unittest
from itertools import product as prod
from timeit import Timer
import time
import math
import logging
import numpy as np
from scipy.optimize import fmin, fmin_bfgs
from hydrodiy.stat.transform import BoxCox2
from hydrodiy.data.containers import Vector
from pygme.model import Model, ParameterCheckValueError
from pygme.calibration import Calibration, CalibParamsVector
from pygme.calibration import ObjFunSSE, ObjFunBCSSE, \
ObjFunKGE, ObjFunBiasBCSSE
from pygme.calibration import CalibrationExplorationError
from dummy import Dummy, CalibrationDummy, ObjFunSSEargs
BC = BoxCox2()
# Set logger
LOGGER = logging.getLogger('pygme.Calibration')
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
ft = logging.Formatter(fmt)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(ft)
LOGGER.addHandler(sh)
class ObjFunTestCase(unittest.TestCase):
def setUp(self):
print('\t=> ObjFunTestCase')
nval = 1000
obs = np.random.uniform(0., 1, size=nval)
idx = np.random.choice(np.arange(nval), nval//100)
obs[idx] = np.nan
self.obs = obs
sim = np.random.uniform(0., 1, size=nval)
idx = np.random.choice(np.arange(nval), nval//100)
sim[idx] = np.nan
self.sim = sim
def test_print(self):
of = ObjFunBCSSE(0.2)
print(of)
of = ObjFunSSE()
print(of)
of = ObjFunKGE()
print(of)
def test_SSE(self):
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
of = ObjFunSSE()
value = of.compute(obs[idx], sim[idx])
err = self.obs-self.sim
expected = np.nansum(err*err)
self.assertTrue(np.allclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_KGE(self):
of = ObjFunKGE()
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
value = of.compute(obs[idx], sim[idx])
obsok, simok = obs[idx], sim[idx]
bias = np.mean(simok)/np.mean(obsok)
rstd = np.std(simok)/np.std(obsok)
corr = np.corrcoef(obsok, simok)[0, 1]
expected = 1-math.sqrt((1-bias)**2+(1-rstd)**2+(1-corr)**2)
self.assertTrue(np.allclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_BCSSE(self):
''' test the BCSSE objfun '''
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
for lam, nu in prod([0.1, 0.5, 1., 2], [1e-4, 1e-2, 1]):
of = ObjFunBCSSE(lam, nu)
assert of.name == f"BCSSE{lam:0.1f}"
value = of.compute(obs[idx], sim[idx])
BC.lam = lam
BC.nu = nu
err = BC.forward(obs)-BC.forward(sim)
expected = np.nansum(err*err)
self.assertTrue(np.isclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_BiasBCSSE(self):
''' test the BiasBCSSE objfun '''
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
mo = obs[idx].mean()
ms = sim[idx].mean()
for lam, nu in prod([0.1, 0.5, 1., 2], [1e-4, 1e-2, 1]):
of = ObjFunBiasBCSSE(lam, nu)
assert of.name == f"BiasBCSSE{lam:0.1f}"
value = of.compute(obs[idx], sim[idx])
BC.lam = lam
BC.nu = nu
err = BC.forward(obs)-BC.forward(sim)
expected = np.nansum(err*err)*(1+abs(ms-mo)/mo)
self.assertTrue(np.isclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
class CalibParamsVectorTestCase(unittest.TestCase):
def setUp(self):
print('\t=> CalibParamsVectorTestCase')
config = Vector([])
nval = 10
params = Vector(['X{0}'.format(k) for k in range(1, nval+1)],
defaults=np.ones(nval), mins=np.zeros(nval), \
maxs=np.ones(nval)*5)
states = Vector(['S{0}'.format(k) for k in range(1, 3)])
self.model = Model('test', config, params, states, 2, 2)
def test_default(self):
''' Test setting default values '''
calparams = CalibParamsVector(self.model)
self.assertTrue(np.all([s1==s2 for s1, s2 in zip(calparams.names, \
self.model.params.names)]))
self.assertTrue(np.allclose(calparams.defaults, \
self.model.params.defaults))
def test_errors_infinite(self):
''' Test errors for finite values in calibrated params '''
nval = self.model.params.nval
cp = Vector(['X{0}'.format(k) for k in range(1, nval+1)])
try:
calparams = CalibParamsVector(self.model, cp)
except ValueError as err:
self.assertTrue(str(err).startswith('Expected no infinite'))
else:
raise ValueError('Problem with error handling')
def test_errors_funs(self):
''' Test errors related to trans2true and true2trans '''
nval = self.model.params.nval
cp = Vector(['X{0}'.format(k) for k in range(1, nval+1)])
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
fun1 = lambda x: 'string1'
fun2 = lambda x: 'string2'
try:
calparams = CalibParamsVector(self.model, cp, fun1, fun2)
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Problem with trans2true for'))
else:
raise ValueError('Problem with error handling')
fun = lambda x: np.column_stack([x, x])
try:
calparams = CalibParamsVector(self.model, cp, fun, fun)
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Problem with trans2true for'))
else:
raise ValueError('Problem with error handling')
def test_identity(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
calparams = CalibParamsVector(self.model, cp)
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
self.assertTrue(np.allclose(self.model.params.values, val))
val = np.random.uniform(0, 1, nval)
calparams.truevalues = val
self.assertTrue(np.allclose(calparams.values, val))
def test_common_transform(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
for i, trans in enumerate(['exp', 'sinh']):
calparams = CalibParamsVector(self.model, cp, trans2true=trans)
if i == 0:
trans2true = np.exp
true2trans = np.log
elif i == 1:
trans2true = np.sinh
true2trans = np.arcsinh
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
self.assertTrue(np.allclose(calparams.truevalues, \
trans2true(val)))
self.assertTrue(np.allclose(self.model.params.values, \
trans2true(val)))
val = np.random.uniform(math.exp(-1), 1, nval)
calparams.truevalues = val
self.assertTrue(np.allclose(calparams.values, \
true2trans(val)))
def test_fixed(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-5]*nval, maxs=[5]*nval)
# Choose a fixed value below the max value
x1 = 4
fixed = {'X1':x1}
calparams = CalibParamsVector(self.model, cp, fixed=fixed)
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
val2 = val.copy()
val2[0] = x1
self.assertTrue(np.allclose(self.model.params.values, val2))
val = np.random.uniform(0, 1, nval)
calparams.truevalues = val
val2 = val.copy()
val2[0] = x1
self.assertTrue(np.allclose(calparams.truevalues, val2))
self.assertTrue(np.allclose(calparams.values, val2))
class CalibrationTestCase(unittest.TestCase):
def setUp(self):
print('\t=> CalibrationTestCase')
# Create random inputs
inputs = np.random.exponential(1, (100, 2))
# Allocate model
dum = Dummy()
dum.allocate(inputs, 2)
# Run model to create a sudo obs
params = dum.params.defaults+0.1
dum.params.values = params
dum.run()
obs = dum.outputs[:, 0].copy()
# Store calibration set up
self.inputs = inputs
self.params = params
self.obs = obs
self.ical = np.arange(10, obs.shape[0])
def test_calibration_instance_print(self):
''' Test printing of calibration object '''
calib = CalibrationDummy(warmup=10)
calib.allocate(self.obs, self.inputs)
str = '{0}'.format(calib)
def test_calibration_errors(self):
''' Test calibration errors '''
inputs = np.random.uniform(0, 1, (1000, 2))
obs = np.random.uniform(0, 1, 1000)
cp = Vector(['tX1', 'tX2'], mins=[-10]*2, maxs=[10]*2, \
defaults=[1, 0])
calparams = CalibParamsVector(Dummy(), cp, trans2true='exp')
calib = Calibration(calparams)
try:
plib = calib.paramslib
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Trying to access paramslib, but '))
else:
raise ValueError('Problem with error handling')
try:
calib.ical = obs==obs
except ValueError as err:
self.assertTrue(str(err).startswith('Trying to get obs, but '))
else:
raise ValueError('Problem with error handling')
def test_explore(self):
''' Test explore function '''
calib = CalibrationDummy(warmup=10)
plib = np.random.uniform(-0.1, 0.1, size=(1000, 2)) \
+ self.params[None, :]
calib.paramslib = plib
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, explo_ofun = calib.explore()
self.assertTrue(np.allclose(start, self.params, rtol=0., atol=0.05))
def test_explore_error(self):
''' Test calibration exploration error '''
class ObjFunError(ObjFunSSE):
''' Sum of squared error objective function '''
def __init__(self):
super(ObjFunError, self).__init__()
self.name = 'Error'
def compute(self, obs, sim, **kwargs):
of = super(ObjFunError, self).compute(obs, sim)
if of < 1e-1:
# This is a stupid error generation
# we use it just for testing
raise ValueError('Error in exploration')
return of
calib = CalibrationDummy(warmup=10, objfun=ObjFunError())
plib = np.random.uniform(-0.1, 0.1, size=(1000, 2)) \
+ self.params[None, :]
calib.paramslib = plib
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, explo_ofun = calib.explore()
# Check that no objective function is below 1e-1
# because the objective function does not allow it
self.assertTrue(np.all(explo_ofun > 1e-1))
# Check that we trigger an error during exploration
try:
start, _, explo_ofun = calib.explore(raise_error=True)
except CalibrationExplorationError as err:
self.assertTrue(str(err).startswith('Error in explo'))
else:
raise ValueError('Problem with error handling')
def test_explore_fit(self):
''' Test explore and fit functions '''
calib = CalibrationDummy(warmup=10)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-3, rtol=0.)
self.assertTrue(ck)
def test_fit_args(self):
''' Test passing arguments to objective function '''
kwargs = {'lam':1.0, 'idx':np.arange(len(self.ical))}
calib = CalibrationDummy(objfun=ObjFunSSEargs(), \
warmup=10, \
objfun_kwargs=kwargs)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-3, rtol=0.)
self.assertTrue(ck)
def test_checkvalues(self):
def fun(values):
if values[1] < 0.5:
raise ParameterCheckValueError
calib = CalibrationDummy(warmup=10, checkvalues=fun)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, ofuns = calib.explore()
idx = calib.paramslib[:, 1] < 0.5
self.assertTrue(np.all(np.isinf(ofuns[idx])))
def test_fixed(self):
''' Test calibration with fixed parameters '''
# Test error
fixed = {'X10':self.params[0]+3}
try:
calib = CalibrationDummy(warmup=10, fixed=fixed)
except ValueError as err:
self.assertTrue(str(err).startswith('Expected names '+\
'of fixed parameters'))
else:
raise ValueError('Problem with error handling')
fixed = {'X1':self.params[0]+3}
calib = CalibrationDummy(warmup=10, fixed=fixed)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
self.assertEqual(fixed, calib.fixed)
self.assertTrue(np.allclose(fixed['X1'], start[0]))
self.assertTrue(np.allclose(fixed['X1'], final[0]))
self.assertTrue(np.allclose(fixed['X1'], \
calib.model.params.values[0]))
def test_workflow(self):
''' Test calibration workflow (i.e. explore+fit) '''
calib = CalibrationDummy(warmup=10)
# Check parameter are not close at the beginning
ck = ~np.allclose(calib.model.params.values, self.params)
self.assertTrue(ck)
# Run calibration
calib.workflow(self.obs, self.inputs, self.ical, iprint=0,
maxfun=100000, ftol=1e-8)
# Test parameters at the end
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-5, rtol=0.)
self.assertTrue(ck)
def test_customised_objfun(self):
''' Test customised objective function '''
# Define a customized objective function
objfun = ObjFunBCSSE(lam=0.8, nu=1e-5)
# Instanciate a new calib obj and applies objfun
calib = CalibrationDummy(warmup=10, objfun=objfun)
# Check parameter are not close at the beginning
ck = ~np.allclose(calib.model.params.values, self.params)
self.assertTrue(ck)
# Run calibration
calib.workflow(self.obs, self.inputs, self.ical, iprint=0,
maxfun=100000, ftol=1e-8)
# Test parameters at the end
ck = np.allclose(calib.model.params.values, self.params, atol=1e-3)
self.assertTrue(ck)
def test_optimizers(self):
''' Test a range of optimizer from scipy '''
calib = CalibrationDummy(objfun=ObjFunSSE(), \
warmup=10)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
for iopt, opt in enumerate([fmin, fmin_bfgs]):
if opt.__name__ in ['fmin', 'fmin_powell']:
kwargs = dict(maxfun=100000, ftol=1e-8)
else:
kwargs = dict(maxiter=100000, gtol=1e-8)
final, _, _ = calib.fit(start=start, iprint=10, optimizer=opt, \
**kwargs)
ck = np.allclose(calib.model.params.values, self.params, \
atol=5e-3, rtol=0.)
if not ck:
print(('Failing optimizer test {0} '+\
'expected params={1}, got {2}').format(\
iopt+1, \
' '.join(list(np.round(\
self.params, 2).astype(str))), \
' '.join(list(np.round(\
calib.model.params.values, 2).astype(str)))
))
self.assertTrue(ck)
if __name__ == "__main__":
unittest.main()
|
csiro-hydroinformatics/pygme
|
tests/test_pygme_calibration.py
|
test_pygme_calibration.py
|
py
| 17,767 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "hydrodiy.stat.transform.BoxCox2",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pygme.calibration.ObjFunBCSSE",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunSSE",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunKGE",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunSSE",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunKGE",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.corrcoef",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunBCSSE",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunBiasBCSSE",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.nansum",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pygme.model.Model",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "numpy.sinh",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "numpy.arcsinh",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.exponential",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "dummy.Dummy",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "hydrodiy.data.containers.Vector",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibParamsVector",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "dummy.Dummy",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.Calibration",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "numpy.allclose",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunSSE",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "numpy.all",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.CalibrationExplorationError",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "dummy.ObjFunSSEargs",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "pygme.model.ParameterCheckValueError",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "numpy.isinf",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunBCSSE",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "dummy.CalibrationDummy",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "pygme.calibration.ObjFunSSE",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.fmin",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "scipy.optimize.fmin_bfgs",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "numpy.allclose",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 537,
"usage_type": "call"
}
] |
16737781321
|
import pdb
import unittest
import json
from objbrowser import browse
import mock
from mock import patch
import music_server
from music_server import youtube_search
from music_server import config
class YoutubeSearchTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_format_query(self):
# given
search_query = "simple_query"
expected_result = "http://youtube.com/results?search_query=simple_query"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_format_query_with_space(self):
# given
search_query = "a b"
expected_result = "http://youtube.com/results?search_query=a+b"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_format_with_plus(self):
# given
search_query = "a+b"
expected_result = "http://youtube.com/results?search_query=a%2Bb"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_fetch_first_result_when_empty(self):
self.assertRaises(TypeError, youtube_search.fetch_results, None)
# empty list
def test_fetch_first_result_when_no_result(self):
# given
html_content = "wrong html content"
# when
result = youtube_search.fetch_results(html_content)
# then
self.assertFalse(result, 'Result should be an empty list')
def test_fetch_results(self):
# given
with open(config.test_resources_folder + 'youtube_search_pratos_osni.html', 'r') as myfile:
html_content = myfile.read()
with open(config.test_resources_folder + 'youtube_search_pratos_osni.json', 'r') as myfile2:
expected_links = json.loads(myfile2.read())
# when
results = youtube_search.fetch_results(html_content)
# then
self.assertEqual(results, expected_links)
@patch('music_server.youtube_search.get_html')
def test_youtube_search(self, test_patch):
# given
with open(music_server.config.test_resources_folder + 'youtube_search_pratos_osni.html') as fh:
mock_html = fh.read()
test_patch.return_value = mock_html
with open(config.test_resources_folder + 'youtube_search_pratos_osni.json', 'r') as myfile2:
expected_links = json.loads(myfile2.read())
# when
results = youtube_search.YoutubeSearch("pratos osni").video_ids
# then
self.assertEqual(results, expected_links)
def test_search_empty(self):
# given
search_query = ''
# when
results = youtube_search.YoutubeSearch(search_query)
# then
self.assertTrue(results)
def test_search_none(self):
# given
search_query = None
# when
results = youtube_search.YoutubeSearch(search_query)
# then
self.assertTrue(results)
if __name__ == '__main__':
unittest.main()
|
Sun42/music_server
|
tests/youtube_search_tests.py
|
youtube_search_tests.py
|
py
| 3,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "music_server.youtube_search.format_query",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "music_server.youtube_search.format_query",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "music_server.youtube_search.format_query",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "music_server.youtube_search.fetch_results",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "music_server.youtube_search",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "music_server.youtube_search.fetch_results",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "music_server.config.test_resources_folder",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "music_server.config",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "music_server.config.test_resources_folder",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "music_server.config",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search.fetch_results",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "music_server.config",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "music_server.config.test_resources_folder",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "music_server.config",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search.YoutubeSearch",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "mock.patch",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search.YoutubeSearch",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "music_server.youtube_search.YoutubeSearch",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "music_server.youtube_search",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 100,
"usage_type": "call"
}
] |
31266417589
|
import pygame
import os
from tkinter import messagebox
import time
import threading
pygame.init()
pygame.mixer.set_num_channels(20)
width = 150
height = 151
channel = 0
stop_music = False
fon_m = pygame.mixer.music
fon_m.load(os.path.join("sounds", "fon_m.mp3"))
fon_m.play()
fon = pygame.image.load(os.path.join("images", "fon.png"))
HEIGHT, WIDTH = fon.get_height(), fon.get_width()
dis = pygame.display.set_mode([WIDTH, HEIGHT])
dis.blit(pygame.transform.scale(pygame.image.load(os.path.join("images", "звук.png")), (WIDTH, HEIGHT)), (0, 0))
class Monster():
def __init__(self, name, x, y, money, max_money, count):
self.image = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}.png")), (width, height))
self.x = x
self.y = y
self.money = money
self.max_money = max_money
self.count = count
class Magazine():
def __init__(self, name, all_seconds):
self.image = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}.png")), (width, height))
self.name = name
self.all_seconds = all_seconds
self.all_seconds_pit = all_seconds
self.image_egg = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}_egg.png")), (100, 71))
def draw_image(dict):
for x in dict:
dis.blit(x.image, (x.x, x.y))
def monster_draw():
for elem in all_vorabularu:
draw_image(elem)
def muzic(dict, muz):
global channel
if len(dict) > 0:
try:
pygame.mixer.Channel(channel).play(pygame.mixer.Sound(os.path.join("sounds", muz)))
channel += 1
except FileNotFoundError:
pass
def all_music():
global channel
global stop_music
for i in range(channel):
pygame.mixer.Channel(i).stop()
channel = 0
if not stop_music:
muzic(bas, "ba_m.mp3")
muzic(tus, "tu_m.mp3")
muzic(mas, 'ma_m.mp3')
muzic(pas, 'pa_m.mp3')
muzic(tis, 'ti_m.mpeg')
muzic(mars, 'mar_m.mp3')
muzic(ras, 'ra_m.ogg')
muzic(sms, 'sm_m.mp3.mpeg')
muzic(lus, 'la_m.mp3')
muzic(izs, 'iz_m.mpeg')
muzic(izs, 'iz_m.mp3')
threading.Timer(7, all_music).start()
def monster_money_every_minuts():
global times
times += 60
all_draw()
threading.Timer(60, monster_money_every_minuts).start()
def staving(vocabulary, name, money, max_money, count):
global file
global vrem_name
vocabulary.append(Monster(stav, mouse[0] - width // 2, mouse[1] - height // 2, money, max_money, count))
file.write(str(vocabulary[-1].x) + ' '+ str(vocabulary[-1].y) + ' ' + str(name) + '\n')
vrem_name = ''
all_draw()
pygame.display.update()
def staving_(vocabulary, name, x, y, money, max_money, count):
vocabulary.append(Monster(name, x, y, money, max_money, count))
def draw_money():
x, y = 0, 0
text = font.render(str(my_money), True, YELLOW, BLACK)
dis.blit(text, (x, y))
y += text.get_height()
text = font.render(str(almaz), True, BLUE, BLACK)
dis.blit(text, (x, y))
def close_coor():
for i in range(mouse[0] - width//2, mouse[0] + width//2):
for j in range(mouse[1] - height//2, mouse[1] + height//2):
close.append((i, j))
def forx(voraluary: list, elem : str):
global ans
global count
for x in voraluary:
for i in range(x.x, x.x + width):
for j in range(x.y, x.y + height):
if mouse == (i, j):
ans += elem
monsters_in_pit.append(x.count)
count += 1
def clik_monster(voraluary):
for x in voraluary:
for i in range(x.x, x.x + width):
for j in range(x.y, x.y + height):
if mouse == (i, j):
return True
def magazin_clik(int, image_x):
global stav
global game
global eggs
global vrem_name
global all_seconds
global my_money
global monster_in_p
mouse = pygame.mouse.get_pos()
if mouse[0] in range(image_x[0], image_x[1]) and mouse[1] in range(HEIGHT // 3 - height // 2, HEIGHT // 3 + height):
if my_money - 300 >= 0 and vrem_name == '' and all_seconds <= 0:
my_money -= 300
game = True
all_draw()
dis.blit(magazine[int].image_egg, (WIDTH - 500 + width//2, height//2 + 27))
pygame.display.update()
vrem_name = magazine[int].name
all_seconds = magazine[int].all_seconds
monster_in_p = int
timer()
elif my_money <= 300:
messagebox.showinfo("", "У вас не хватает денег")
game = True
all_draw()
pygame.display.update()
else:
messagebox.showinfo("", "Питомник уже занят")
game = True
all_draw()
pygame.display.update()
def monster_money(vocabulary):
for x in vocabulary:
if x.money*(times//60) < x.max_money:
text = str(x.money*(times//60))
else:
text = str(x.max_money)
text = font.render((text), True, YELLOW)
dis.blit(text, (x.x + width // 4, x.y + height))
def sbor_money(vocabulary):
global my_money
global times
for x in vocabulary:
if x.money * (times // 60) <= x.max_money:
my_money += x.money * (times // 60)
else:
my_money += x.max_money
def all_draw():
global monster_in_p
global monster_in_pit
global monsters_in_pit
dis.blit(fon, (0, 0))
pygame.draw.rect(dis, YELLOW, (WIDTH - 100, HEIGHT - 100, 100, 100))
draw_money()
dis.blit(pit, (300, 0))
dis.blit(ppp, (WIDTH - 500, 0))
pygame.draw.rect(dis, BLACK, (0, HEIGHT - 100, 100, 100))
pygame.draw.rect(dis, BLACK, (200, 0, 100, 100))
text = font.render("-2", True, BLUE, BLACK)
dis.blit(text, (0, 150))
dis.blit(pygame.transform.scale(pygame.image.load(os.path.join("images", "звук.png")), (100, 100)),(100, HEIGHT - 100))
monster_draw()
if monster_in_p != -1:
dis.blit(magazine[monster_in_p].image_egg, (WIDTH - 500 + width//2, height//2 + 27))
stroka = str(all_seconds - seconds)
dis.blit(font.render(str(all_seconds - seconds), True, WHITE), ((WIDTH - (400 + 10 *len(stroka)-1)) , 240))
if monster_in_pit != -1:
# dis.blit(magazine[monsters_in_pit[0]].image_egg, (300, 15))
# dis.blit(magazine[monsters_in_pit[1]].image_egg, (450, 15))
stroka = str(all_seconds_pit - seconds_pit)
dis.blit(font.render(stroka, True, WHITE), (300 - (10 * len(stroka)-1), 240))
for elem in all_vorabularu:
monster_money(elem)
def timer_pit():
global monster_in_p
global all_seconds_pit
global vrem_name
global seconds_pit
global all_seconds
global monster_in_pit
global all_seconds_pit
global monsters_in_pit
global vrem_name_pit
global stav
if game:
all_draw()
pygame.display.update()
if seconds_pit < all_seconds_pit:
seconds_pit += 1
threading.Timer(1, timer_pit).start()
else:
if all_seconds == -1 and vrem_name == '' and monster_in_pit != -1:
all_draw()
dis.blit(magazine[monster_in_pit].image_egg, (300, 20))
all_seconds = magazine[monster_in_pit].all_seconds
monster_in_p = monster_in_pit
monster_in_pit = -1
seconds_pit = 0
monsters_in_pit = []
vrem_name_pit = ''
# vrem_name = magazine[monster_in_pit].name
vrem_name = stav
pygame.display.update()
timer()
else:
threading.Timer(1, timer_pit).start()
def timer():
global eggs
global stav
global seconds
global vrem_name
global seall_seconds
global monster_in_p
global all_seconds
if game:
all_draw()
pygame.display.update()
if seconds < all_seconds:
seconds += 1
threading.Timer(1, timer).start()
else:
stav = vrem_name
eggs = True
monster_in_p = -1
seconds = 0
all_seconds = -1
def all_sbor_money():
for elem in all_vorabularu:
sbor_money(elem)
bas = []
tus = []
mas = []
pas = []
lus = []
osms = []
zes = []
uts = []
uds = []
kus = []
tis = []
ras = []
mars = []
sms = []
izs = []
magazine = []
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
my_money = 1000
almaz = 100
all_vorabularu = [bas, tus, mas, pas, lus, zes, uts, uds, kus, osms, tis, sms, mars, ras, izs]
font = pygame.font.Font('freesansbold.ttf', 70)
count = 0
# fon = pygame.transform.scale(fon, (WIDTH, HEIGHT))
close = []
stav = ''
times = 0
seconds = 0
seconds_pit = 0
monsters_in_pit = []
vrem_name = ''
vrem_name_pit = ''
monster_in_p = -1
monster_in_pit = -1
ba = pygame.transform.scale(pygame.image.load(os.path.join("images", "ba.png")), (width, height))
tu = pygame.transform.scale(pygame.image.load(os.path.join("images", "tu.png")), (width, height))
ma = pygame.transform.scale(pygame.image.load(os.path.join("images", "ma.png")), (width, height))
pa = pygame.transform.scale(pygame.image.load(os.path.join("images", "pa.png")), (width, height))
lu = pygame.transform.scale(pygame.image.load(os.path.join("images", "lu.png")), (width, height))
ku = pygame.transform.scale(pygame.image.load(os.path.join("images", "ku.png")), (width, height))
ze = pygame.transform.scale(pygame.image.load(os.path.join("images", "ze.png")), (width, height))
osm =pygame.transform.scale( pygame.image.load(os.path.join("images", "osm.png")), (width, height))
ud = pygame.transform.scale(pygame.image.load(os.path.join("images", "ud.png")), (width, height))
ut = pygame.transform.scale(pygame.image.load(os.path.join("images", "ut.png")), (width, height))
mar =pygame.transform.scale( pygame.image.load(os.path.join("images", "mar.png")), (width, height))
ti = pygame.transform.scale(pygame.image.load(os.path.join("images", "ti.png")), (width, height))
ra = pygame.transform.scale(pygame.image.load(os.path.join("images", "ra.png")), (width, height))
sm = pygame.transform.scale(pygame.image.load(os.path.join("images", "sm.png")), (width, height))
iz = pygame.image.load(os.path.join("images", f"iz.png"))
file = open('my single monsters.txt','r+')
pit = pygame.image.load(os.path.join("images", "питомник.png"))
ppp = pygame.image.load(os.path.join("images", "ppp.png"))
pit_width = 220
pit_height = 300
pit = pygame.transform.scale(pit, (pit_width, pit_height))
ppp = pygame.transform.scale(ppp, (pit_width + 50, pit_height))
dis.blit(fon, (0, 0))
dis.blit(pit, (300, 0))
dis.blit(ppp, (WIDTH - 500, 0))
pygame.draw.rect(dis, YELLOW, (WIDTH - 100, HEIGHT - 100, 100, 100))
draw_money()
ee = ''
monster_money_every_minuts()
pygame.draw.rect(dis, BLACK, (0, HEIGHT - 100, 100, 100))
pygame.display.update()
all_seconds = -1
all_seconds_pit = -1
game = True
for line in file:
try:
x, y, name = line.split(' ')
x = int(x)
y = int(y)
if len(name) == 3:
ee += name[-3]
ee += name[-2]
if ee == 'ba':
staving_(bas, 'ba', x, y, 4, 18, 0)
ee = ''
elif ee == 'tu':
staving_(tus, 'tu', x, y, 2, 30, 1)
ee = ''
elif ee == 'ma':
staving_(mas, 'ma', x, y, 3, 30, 2)
ee = ''
elif ee == 'pa':
staving_(pas, 'pa', x, y, 3, 18, 3)
ee = ''
elif ee == 'ze':
staving_(zes, 'ze', x, y, 5, 225, 5)
ee = ''
elif ee == 'ud':
staving_(uds, 'ud', x, y, 6, 180, 7)
ee = ''
elif ee == 'ut':
staving_(uts, 'ut', x, y, 4, 300, 6)
ee = ''
elif ee == 'ku':
staving_(kus, 'ku', x, y, 6, 120, 8)
ee = ''
elif ee == 'lu':
staving_(lus, 'lu', x, y, 5, 225, 4)
ee = ''
elif ee == 'osm':
staving_(osms, 'osm', x, y, 5, 300, 9)
ee = ''
elif ee == 'ti':
staving_(tis, 'ti', x, y, 8, 2160, 10)
ee = ''
elif ee == 'sm':
staving_(sms, 'sm', x, y, 7, 1890, 11)
ee = ''
elif ee == 'mar':
staving_(mars, 'mar', x, y, 8, 1872, 12)
ee = ''
elif ee == 'ra':
staving_(ras, 'ra', x, y, 9, 1872, 13)
ee = ''
elif ee == 'iz':
staving_(izs, 'iz', x, y, 12, 11232, 14)
ee = ''
elif len(name) == 4:
ee += name[-4]
ee += name[-3]
ee += name[-2]
if name == 'osm':
staving_(osms, 'osm', x, y, 5, 300, 9)
ee = ''
pygame.display.update()
except:
try:
my_money, almaz = map(int, (line.split(' ')))
monster_draw()
pygame.display.update()
except:
try:
all_seconds_pit, vrem_name_pit, monster_in_pit = line.split(' ')
if int(all_seconds_pit) - times >= 0:
all_seconds_pit = int(all_seconds_pit) - times
else:
all_seconds_pit = 0
monster_in_pit = int(monster_in_pit)
except:
try:
all_seconds, vrem_name, monster_in_p = line.split(' ')
if int(all_seconds) - times >= 0:
all_seconds = int(all_seconds) - times
else:
all_seconds = -1
monster_in_p = int(monster_in_p)
except:
try:
a, b, c, d, e = line.split(' ')
times = int(time.time()) - int(a) + int(b)
except:
pass
for elem in all_vorabularu:
draw_image(elem)
for i in range(300, 300 + pit_width):
for j in range(pit_height):
close.append((i, j))
pygame.display.update()
cloak = time.time()
pit_ak = False
ans = ''
run = True
for elem in all_vorabularu:
monster_money(elem)
pygame.display.update()
eggs = False
magazine.append(Magazine('ba', 5))
magazine.append(Magazine('tu', 60))
magazine.append(Magazine('ma', 2 * 60))
magazine.append(Magazine('pa', 2 * 60 * 60))
magazine.append(Magazine('lu', 30 * 60))
magazine.append(Magazine('ze', 8 * 60 * 60))
magazine.append(Magazine('ut', 8 * 60 * 60))
magazine.append(Magazine('ud', 8 * 60 * 60))
magazine.append(Magazine('ku', 8 * 60 * 60))
magazine.append(Magazine('osm', 8 * 60 * 60))
magazine.append(Magazine('ti', 8 * 60 * 60))
magazine.append(Magazine('sm', 12 * 60 * 60))
magazine.append(Magazine('mar',12 * 60 * 60))
magazine.append(Magazine('ra', 12 * 60 * 60))
magazine.append(Magazine('iz', 24 * 60 * 60))
if all_seconds >= 0:
timer()
if all_seconds_pit >= 0:
timer_pit()
# all_music()
fon_m.stop()
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
file.write(str(my_money) + ' ' + str(almaz) + '\n')
file.write(str(int(time.time())) + ' ' + str(times) + ' ' + '2 ' +'3 ' + '\n')
try:
file.write((str(all_seconds - seconds)) + ' ' + str(vrem_name) + ' ' + str(monster_in_p) + '\n')
except:
pass
try:
if all_seconds_pit > -1:
file.write(str(all_seconds_pit - seconds_pit) + ' ' + str(vrem_name_pit) + ' ' + str(monster_in_pit) + '\n')
except:
pass
file.close()
run = False
pygame.quit()
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
if game:
if eggs == True and stav!= '' and game:
if 1100 > mouse[0] > 200 and 600 > mouse[1] > 150:
if mouse not in close:
eggs = False
seconds = 0
if stav == 'ba':
staving(bas, 'ba', 4, 18, 0)
elif stav == 'tu':
staving(tus, 'tu', 2, 30, 1)
elif stav == 'ma':
staving(mas, 'ma', 3, 30, 2)
elif stav == 'pa':
staving(pas, 'pa', 3, 18, 3)
elif stav == 'lu':
staving(lus, 'lu', 5, 225, 4)
elif stav == 'ze':
staving(zes, 'ze', 5, 225, 5)
elif stav == 'ku':
staving(kus, 'ku', 6, 120, 8)
elif stav == 'ut':
staving(uts, 'ut', 4, 300, 6)
elif stav == 'ud':
staving(uds, 'ud', 6, 180, 7)
elif stav == 'osm':
staving(osms, 'osm', 5, 300, 9)
elif stav == 'ti':
staving(tis, 'ti', 8, 2160, 10)
elif stav == 'mar':
staving(mars, 'mar', 8, 1872, 12)
elif stav == 'sm':
staving(sms, 'sm', 7, 1890, 11)
elif stav == 'ra':
staving(ras, 'ra', 9, 1872, 13)
elif stav == 'iz':
staving(izs, 'iz', 12, 11232, 14)
close_coor()
# song_f = False
stav = ''
all_draw()
pygame.display.update()
elif pit_ak == True:
forx(bas, 'ba')
forx(tus, 'tu')
forx(mas, 'ma')
forx(pas, 'pa')
forx(lus, 'batu')
forx(uds, 'bama')
forx(uts, 'tuma')
forx(osms, 'tupa')
forx(zes, 'pama')
forx(osms, 'tupa')
forx(kus, 'tupa')
if count == 2:
seconds_pit = 0
if 'ba' in ans and 'tu' in ans and 'ma' in ans and 'pa' in ans:
all_seconds_pit = 24 * 60 * 60
monster_in_pit = 14
vrem_name_pit = 'iz'
timer_pit()
stav = 'iz'
elif 'ba' in ans and 'tu' in ans and 'ma' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 10
vrem_name_pit = 'ti'
timer_pit()
stav = 'ti'
elif 'ba' in ans and 'tu' in ans and 'pa' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 12
vrem_name_pit = 'mar'
timer_pit()
stav = 'mar'
elif 'ba' in ans and 'pa' in ans and 'ma' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 13
timer_pit()
vrem_name_pit = 'ra'
stav = 'ra'
elif 'pa' in ans and 'tu' in ans and 'ma' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 11
vrem_name_pit = 'sm'
timer_pit()
stav = 'sm'
elif 'tu' in ans and 'ba' in ans:
all_seconds_pit = 30 * 60
monster_in_pit = 4
vrem_name_pit = 'lu'
timer_pit()
stav = 'lu'
elif 'ma' in ans and 'tu' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 6
vrem_name_pit = 'ut'
timer_pit()
stav = 'ut'
elif 'ba' in ans and 'ma' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 7
vrem_name_pit = 'ud'
timer_pit()
stav = 'ud'
elif 'tu' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 9
vrem_name_pit = 'osm'
timer_pit()
stav = 'osm'
elif 'ba' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 8
vrem_name_pit = 'ku'
timer_pit()
stav = 'ku'
elif 'ma' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 5
vrem_name_pit = 'ze'
timer_pit()
stav = 'ze'
all_draw()
pygame.display.update()
ans = ''
pit_ak = False
count = 0
elif mouse[0] in range(WIDTH - 100, WIDTH) and mouse[1] in range(HEIGHT - 100, HEIGHT):
all_sbor_money()
times = 0
dis.fill(WHITE)
pygame.draw.rect(dis, BLACK, (WIDTH - 100, HEIGHT - 100, 100, 100))
game = False
draw_money()
for x in range(0, 4):
a = 0
if x == 1:
a = WIDTH // 4
elif x == 2:
a = WIDTH // 2
elif x == 3:
a = WIDTH - WIDTH // 4
dis.blit(magazine[x].image, (a, HEIGHT // 3))
text = font.render('300', True, YELLOW, WHITE)
dis.blit(text, (a, HEIGHT // 3 + height))
elif mouse[0] in range(0, 100) and mouse[1] in range(HEIGHT - 100, HEIGHT):
all_sbor_money()
times = 0
all_draw()
pygame.display.update()
elif mouse[0] in range(300, 300 + pit_width) and mouse[1] in range(pit_height):
if pit_ak == False:
pit_ak = True
elif mouse[0] in range(0, 100) and mouse[1] in range(150, 250):
almaz -= 2
if seconds_pit + 3600 <= all_seconds_pit:
seconds_pit += 3600
else:
seconds_pit = all_seconds_pit
if seconds + 3600 <= all_seconds:
seconds += 3600
else:
seconds = all_seconds
elif mouse[0] in range(200, 300) and mouse[1] in range(0, 100):
bas = []
tus = []
mas = []
pas = []
lus = []
osms = []
zes = []
uts = []
uds = []
kus = []
tis = []
ras = []
mars = []
sms = []
izs = []
all_vorabularu = [bas, tus, mas, pas, lus, zes, uts, uds, kus, osms, tis, sms, mars, ras, izs]
my_money = 1000
almaz = 1000
close = []
vrem_name = ''
vrem_name_pit = ''
seconds = 0
seconds_pit = 0
all_seconds = -1
all_seconds_pit = -1
monster_in_p = -1
monster_in_pit = -1
monsters_in_pit = []
channel = 0
count = 0
stav = ''
times = 0
file.truncate(0)
all_draw()
pygame.display.update()
elif mouse[0] in range(WIDTH - 300, WIDTH) and mouse[1] in range (0, 300):
my_money += 10000
almaz += 100
seconds = all_seconds
seconds_pit = all_seconds_pit
all_draw()
pygame.display.update()
elif mouse[0] in range(100, 200) and mouse[1] in range(HEIGHT - 100, HEIGHT):
if not stop_music:
stop_music = True
else:
stop_music = False
all_music()
pygame.display.update()
else:
magazin_clik(0, (0, 0 + width))
magazin_clik(1, (WIDTH // 4, WIDTH // 4 + width))
magazin_clik(2, (WIDTH // 2, WIDTH // 2 + width))
magazin_clik(3, (WIDTH - WIDTH // 4, WIDTH - WIDTH // 4 + width))
if mouse[0] in range(WIDTH - 100, WIDTH) and mouse[1] in range(HEIGHT - 100, HEIGHT):
game = True
all_draw()
pygame.display.update()
|
solvalkon/python_study
|
my single monsters/my single monsters class.py
|
my single monsters class.py
|
py
| 26,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.set_num_channels",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Channel",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "pygame.display.update",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pygame.display.update",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 469,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 474,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 510,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 567,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 568,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "pygame.quit",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 593,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 595,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 638,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 638,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 745,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 745,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 757,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 781,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 858,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 858,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 867,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 867,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 876,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 876,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 889,
"usage_type": "attribute"
}
] |
27825918351
|
"""
создайте асинхронные функции для выполнения запросов к ресурсам (используйте aiohttp)
- доработайте модуль `jsonplaceholder_requests`:
- установите значения в константы `USERS_DATA_URL` и `POSTS_DATA_URL` (ресурсы нужно взять отсюда https://jsonplaceholder.typicode.com/)
- создайте асинхронные функции для выполнения запросов к данным ресурсам (используйте `aiohttp`)
- рекомендуется добавить базовые функции для запросов, которые будут переиспользованы (например `fetch_json`)
"""
from aiohttp import ClientSession
import asyncio
# import logging
#
# DEFAULT_FORMAT = "%(asctime)s %(levelname)-8s [%(name)-8s] (%(filename)s:%(funcName)s:%(lineno)d) %(message)s"
#
# logging.basicConfig(format=DEFAULT_FORMAT, level=logging.DEBUG)
#
# log = logging.getLogger(__name__)
USERS_DATA_URL = "https://jsonplaceholder.typicode.com/users"
POSTS_DATA_URL = "https://jsonplaceholder.typicode.com/posts"
async def fetch_json(session: ClientSession, url: str):
async with session.get(url) as response:
return await response.json()
async def fetch_users():
# log.info(f"Fetch users from {USERS_DATA_URL}")
async with ClientSession() as session:
json_data = await fetch_json(session, USERS_DATA_URL)
# log.info(f"Fetch json from {USERS_DATA_URL}: {json_data}")
return json_data
async def fetch_posts():
# log.info(f"Fetch posts from {POSTS_DATA_URL}")
async with ClientSession() as session:
json_data = await fetch_json(session, POSTS_DATA_URL)
# log.info(f"Fetch json from {POSTS_DATA_URL}: {json_data}")
return json_data
# def main():
# asyncio.run(fetch_users())
# asyncio.run(fetch_posts())
#
#
# if __name__ == '__main__':
# main()
|
MikhailParkin/MikhailParkin
|
homework_04/jsonplaceholder_requests.py
|
jsonplaceholder_requests.py
|
py
| 2,013 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "aiohttp.ClientSession",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 38,
"usage_type": "call"
}
] |
32474300219
|
from django.conf.urls import url, include
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'signup', views.ProfileViewSet)
router.register(r'add_animal', views.AddAnimalViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^login/', views.login),
]
|
stoic1979/pashu_palak_sahayak
|
api/urls.py
|
urls.py
|
py
| 439 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "api.views.ProfileViewSet",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "api.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "api.views.AddAnimalViewSet",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "api.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api.views.login",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "api.views",
"line_number": 14,
"usage_type": "name"
}
] |
2108921101
|
import sys
from requests import get
from io import BytesIO
import sqlite3
from PIL import Image
from data.PYTHON_files.main import Ui_MainWindow
from data.PYTHON_files.load_image import Ui_Form
from data.PYTHON_files.description import Ui_Form_Desk
from data.PYTHON_files.effects import *
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtWidgets import QFileDialog
WINDOW_SIZE = (1238, 859)
IMAGE_FRAME_SIZE = (895, 775)
class LoadImage(QMainWindow, Ui_Form):
def __init__(self, parent):
super(LoadImage, self).__init__(parent)
self.setupUi(self)
self.initUI()
def initUI(self):
WINDOW_SIZE = (601, 323)
self.setFixedSize(*WINDOW_SIZE)
class ShowDescription(QMainWindow, Ui_Form_Desk):
def __init__(self, parent):
super(ShowDescription, self).__init__(parent)
self.setupUi(self)
self.initUI()
def initUI(self):
WINDOW_SIZE = (770, 640)
self.setFixedSize(*WINDOW_SIZE)
self.text = ""
con = sqlite3.connect("data/DB_files/filters_db.sqlite")
cur = con.cursor()
result = cur.execute("SELECT * FROM filters").fetchall()
for elem in result:
self.text += "{} - {}\n\n".format(*elem)
con.close()
def show(self, text=None):
self.textBrowser.clear()
if not text:
text = self.text
self.textBrowser.append(text)
super().show()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.initUI()
def initUI(self):
self.open_url_form = LoadImage(self)
self.show_description = ShowDescription(self)
self.setFixedSize(*WINDOW_SIZE)
self.history = []
self.filters_history = []
self.sliders_history = []
self.image_index = 0
self.image_PIL = None
self.scaled_size = [None, None]
# open description form
self.actionfilters_information.triggered.connect(
lambda: self.show_description.show()
)
# open filters history form
self.actionfilters_history.triggered.connect(self.show_filters_history)
# open appliacation info form
self.actionapplication_info.triggered.connect(self.show_application_info)
# file open/save
self.actionopen.triggered.connect(self.load_image)
self.actionsave.triggered.connect(self.save_image)
self.actionopen_from_URL.triggered.connect(self.load_from_url_form)
self.open_url_form.load_url_btn.clicked.connect(self.load_from_url)
# theme
self.action_darktheme.triggered.connect(self.set_dark_theme)
self.action_lighttheme.triggered.connect(self.set_light_theme)
# connecting preset buttons
self.btns_preset = [
self.btn_preset_1,
self.btn_preset_2,
self.btn_preset_3,
self.btn_preset_4,
self.btn_preset_5,
self.btn_preset_6,
self.btn_preset_7,
self.btn_preset_8,
self.btn_preset_9,
self.btn_preset_10,
]
for btn in self.btns_preset:
btn.clicked.connect(self.set_presets)
# connecting special effects buttons
self.btn_box_blur.clicked.connect(self.set_box_blur)
self.gaussian_blur.clicked.connect(self.set_gaussian_blur)
self.btn_unsharp_mask.clicked.connect(self.set_unsharp_mask)
self.btn_stereo.clicked.connect(self.set_stereo)
self.btn_square_effect.clicked.connect(self.set_square_effect)
self.btn_black_and_white.clicked.connect(self.set_black_and_white)
self.btn_negative.clicked.connect(self.set_negative)
# connecting back/reset buttons
self.btn_reset.clicked.connect(self.recet_image)
self.btn_back.clicked.connect(self.previous_image)
# connecting sliders
self.red_slider.valueChanged.connect(
self.change_channels(self.red_slider, (1, 0, 0))
)
self.green_slider.valueChanged.connect(
self.change_channels(self.green_slider, (0, 1, 0))
)
self.blue_slider.valueChanged.connect(
self.change_channels(self.blue_slider, (0, 0, 1))
)
self.red_slider.sliderReleased.connect(
self.apply_channel_changes(self.red_slider, (1, 0, 0))
)
self.green_slider.sliderReleased.connect(
self.apply_channel_changes(self.green_slider, (0, 1, 0))
)
self.blue_slider.sliderReleased.connect(
self.apply_channel_changes(self.blue_slider, (0, 0, 1))
)
self.rgb_sliders = [self.red_slider, self.green_slider, self.blue_slider]
self.alpha_slider.valueChanged.connect(self.change_transparency)
self.statusbar = self.statusBar()
# load theme from .txt file
theme = self.load_theme()
if theme == "dark":
self.set_dark_theme()
else:
self.set_light_theme()
def show_application_info(self):
with open("data/TXT_files/info.txt", "r", encoding="utf-8") as file1:
data = file1.read().strip()
self.show_description.textBrowser.clear()
self.show_description.show(data)
def set_filter_history(self):
con = sqlite3.connect("data/DB_files/history_db.sqlite")
cur = con.cursor()
text = " ".join(self.filters_history)
text_length = len(
" ".join([i[0] for i in cur.execute("SELECT * FROM history").fetchall()])
+ text
)
if text:
cur.execute(
"""
INSERT INTO history(effects) VALUES(?)
""",
(text,),
).fetchall()
if text_length > 600:
result = cur.execute(
"""
SELECT effects FROM history
"""
).fetchone()[0]
cur.execute(
"""
DELETE FROM history
WHERE effects = ?
""",
(result,),
).fetchall()
con.commit()
con.close()
def show_filters_history(self):
con = sqlite3.connect("data/DB_files/history_db.sqlite")
cur = con.cursor()
result = cur.execute("SELECT * FROM history").fetchall()
text = ""
self.show_description.textBrowser.clear()
for i, line in enumerate(result, start=1):
text += f"{i}) {line[0]}\n\n"
self.show_description.show(text)
con.close()
def keyPressEvent(self, event):
mod = int(event.modifiers())
key = event.key()
if mod == Qt.ControlModifier:
if key == Qt.Key_O:
self.load_image()
if key == Qt.Key_S:
self.save_image()
def change_transparency(self):
if self.check_if_image_opened(): # check for image opened
return
self.image_PIL = transparensy(self.image_PIL, self.sender().value())
self.update_image()
def change_channels(self, slider, chan):
def inner():
if self.check_if_image_opened(): # check for image opened
return
val = slider.value()
rgb = tuple(map(lambda n: val if n == 1 else 50, chan))
self.image.setPixmap(
convert_to_qt(channels(self.image_PIL.resize(self.scaled_size), rgb))
)
return inner
def apply_channel_changes(self, slider, chan):
def inner():
if self.check_if_image_opened(): # check for image opened
return
val = slider.value()
rgb = tuple(map(lambda n: val if n == 1 else 50, chan))
self.image_PIL = channels(self.image_PIL, rgb)
self.update_image()
self.alpha_slider.setValue(
255
) # we cannot change RGB-channel with changed alpha channel
return inner
def set_box_blur(self):
if self.check_if_image_opened(): # check for image opened
return
raduis = self.spin_box_blur_raduis.value()
self.image_PIL = box_blur(self.image_PIL, raduis)
self.update_image()
self.filters_history.append("Box Blur")
def set_gaussian_blur(self):
if self.check_if_image_opened(): # check for image opened
return
raduis = self.spin_gaussian_blur_raduis.value()
self.image_PIL = gaussian_blur(self.image_PIL, raduis)
self.update_image()
self.filters_history.append("Gaussian Blur")
def set_unsharp_mask(self):
if self.check_if_image_opened(): # check for image opened
return
raduis = self.unsharp_mask_raduis_spin.value()
percent = self.unsharp_mask_percent_spin.value()
threshold = self.unsharp_mask_threshold_spin.value()
self.image_PIL = unsharp_mask(self.image_PIL, raduis, percent, threshold)
self.update_image()
self.filters_history.append("Unsharp Mask")
def set_stereo(self):
if self.check_if_image_opened(): # check for image opened
return
delta = self.stereo_delta_spin.value()
self.image_PIL = stereo_effect(self.image_PIL, delta)
self.update_image()
self.filters_history.append("Stereo")
def set_square_effect(self):
if self.check_if_image_opened(): # check for image opened
return
self.alpha_slider.setValue(255)
area = self.square_effect_area_spin.value()
self.image_PIL = lightest_pixel_effect(self.image_PIL, area)
self.update_image()
self.filters_history.append("Square Effect")
def set_black_and_white(self):
if self.check_if_image_opened(): # check for image opened
return
self.image_PIL = black_and_white_effect(self.image_PIL)
self.update_image()
self.filters_history.append("Black And White")
def set_negative(self):
if self.check_if_image_opened(): # check for image opened
return
self.image_PIL = negative_effect(self.image_PIL)
self.update_image()
self.filters_history.append("Negative")
def set_presets(self):
if self.check_if_image_opened(): # check for image opened
return
self.image_PIL = preset_filters(self.image_PIL, filters[self.sender().text()])
self.update_image()
self.filters_history.append(self.sender().text())
def convert_image(self, image):
self.filters_history = []
self.history = []
self.sliders_history = []
# делаем изображение меньше, если оно не влезает в рамки
width, height = image.size
scale1 = scale2 = 1
if width > IMAGE_FRAME_SIZE[0]:
scale1 = IMAGE_FRAME_SIZE[0] / width
if height > IMAGE_FRAME_SIZE[1]:
scale2 = IMAGE_FRAME_SIZE[1] / height
scale = scale1 if scale1 < scale2 else scale2
self.scaled_size = (int(width * scale), int(height * scale))
# self.image_PIL = self.image_PIL.resize(self.scaled_size)
self.origin_image = self.image_PIL.copy()
self.history.append(self.origin_image)
# ________________________________________
self.image.move(0, 0)
self.image.setAlignment(QtCore.Qt.AlignCenter)
self.update_image()
self.recet_image()
def load_image(self):
filename = QFileDialog.getOpenFileName(
self,
"Choose photo",
"",
"Pictures (*.png *.jpg);; Pictures (*.png);; Pictures (*.jpg)",
)[0].strip()
if not filename:
return
# filename = "/home/anchous/Pictures/waves.png"
self.image_PIL = Image.open(filename)
self.convert_image(self.image_PIL)
def save_image(self):
filename = QFileDialog.getSaveFileName(
self, "Save photo", "", "Pictures (*.png);; Pictures (*.jpg)"
)[0].strip()
if not filename:
self.statusbar.showMessage("Error: No filename", 5000)
return
if not self.image_PIL:
self.statusbar.showMessage("Error: No image", 5000)
return
# if the filename is incorrect, request it again
try:
self.image_PIL.save(filename)
except Exception:
self.statusbar.showMessage("Error: Incorrect filename", 5000)
filename = QFileDialog.getSaveFileName(
self,
"Save photo",
f"{filename.split('.')[0]}.png",
"Pictures (*.png);; Pictures (*.jpg)",
)[0].strip()
self.image_PIL.save(filename)
self.set_filter_history()
def load_from_url_form(self):
self.open_url_form.show()
def load_from_url(self):
try:
url = self.open_url_form.url_text.toPlainText()
response = get(url)
self.image_PIL = Image.open(BytesIO(response.content))
self.convert_image(self.image_PIL)
self.open_url_form.url_text.setPlainText("")
self.open_url_form.close()
except Exception:
self.statusbar.showMessage("Error: Incorrect url", 5000)
return
def update_image(self):
self.history.append(self.image_PIL)
self.sliders_history.append(
(
self.alpha_slider.value(),
self.red_slider.value(),
self.green_slider.value(),
self.blue_slider.value(),
)
)
self.image_index += 1
self.image.setPixmap(convert_to_qt(self.image_PIL.resize(self.scaled_size)))
def previous_image(self):
if self.image_index > 0:
del self.history[self.image_index :]
del self.sliders_history[self.image_index :]
self.image_index -= 1
self.image_PIL = self.history[self.image_index]
self.alpha_slider.setValue(self.sliders_history[-1][0])
self.red_slider.setValue(self.sliders_history[-1][1])
self.green_slider.setValue(self.sliders_history[-1][2])
self.blue_slider.setValue(self.sliders_history[-1][3])
# updating image without history logging
self.image.setPixmap(convert_to_qt(self.image_PIL.resize(self.scaled_size)))
def recet_image(self):
if self.check_if_image_opened(): # check for image opened
return
self.image_PIL = self.origin_image.copy()
for sl in self.rgb_sliders:
sl.setValue(50)
self.alpha_slider.setValue(255)
self.update_image()
self.image_index = 0
self.history = [self.image_PIL]
self.sliders_history = [(255, 50, 50, 50)]
def set_dark_theme(self):
self.setStyleSheet("background-color: #353535;\ncolor: #dddddd;")
self.frame.setStyleSheet("background-color: #282828;")
self.set_theme("dark")
def set_light_theme(self):
self.setStyleSheet("background-color: #dddddd;\ncolor: #202020;")
self.frame.setStyleSheet("background-color: #cccccc;")
self.set_theme("light")
def check_if_image_opened(self):
try:
return bool(self.image_PIL) is False # check if image opened
except AttributeError:
return True
def set_theme(self, theme):
with open("data/TXT_files/theme.txt", "w", encoding="UTF-*") as file1:
file1.write(theme)
def load_theme(self):
with open("data/TXT_files/theme.txt", "r", encoding="UTF-8") as file1:
theme = file1.read().strip()
return theme
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.excepthook = except_hook
sys.exit(app.exec_())
|
Programmer-Anchous/Effects-program
|
run.py
|
run.py
|
py
| 16,243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "data.PYTHON_files.load_image.Ui_Form",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "data.PYTHON_files.description.Ui_Form_Desk",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "data.PYTHON_files.main.Ui_MainWindow",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "data.PYTHON_files.main",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "data.PYTHON_files.main",
"line_number": 177,
"usage_type": "argument"
},
{
"api_name": "sqlite3.connect",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.ControlModifier",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Key_O",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.Key_S",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "sys.__excepthook__",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "sys.excepthook",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 496,
"usage_type": "call"
}
] |
31746854279
|
""" This module allows you to download public files from Google Drive and Dropbox """
import os
import requests
import zipfile
import logging
import patoolib
from bs4 import BeautifulSoup
import gdrivedl
# Define urls to filter by cloud service
GDRIVE_URL = 'drive.google.com'
DROPBOX_URL = 'dropbox.com'
def download_folder(url, output_folder, silent, filename=None):
"""Download Google Drive folders"""
dl = gdrivedl.GDriveDL(quiet=silent, overwrite=False, mtimes=False)
dl.process_url(url, output_folder, filename=None)
def download_file(url, output_folder, filename, silent):
""" Download Google Drive files"""
dl = gdrivedl.GDriveDL(quiet=silent, overwrite=False, mtimes=False)
dl.process_url(url, output_folder, filename)
def gd_download(url, directory, quiet):
""" Detects if url belongs to Google Drive folder or file and calls relavent function """
if 'folder' in url:
output = get_title(url)[:-15]
output_path = directory + output
logging.info(f"---> Downloading Google Drive folder to: {output_path}")
download_folder(url, output_path, quiet)
return True
elif 'file' in url:
temp_output = get_title(url)[:-15]
output = temp_output.split('.', 1)[0]
logging.info(f"---> Downloading Google Drive file to {directory + temp_output}")
download_file(url, directory, temp_output, quiet)
unzip(temp_output, output, directory)
return True
else:
return False
def get_title(url):
""" Gets file/folder title with requests library """
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
for title in soup.find_all('title'):
return title.get_text()
def compression_type(file_name):
""" Detects file compression type """
ext = os.path.splitext(file_name)[-1].lower()
return ext
def unzip(zipped_file, unzipped_file, directory):
""" Uncompresses files and then deletes compressed folder """
if compression_type(zipped_file) == '.zip':
zip_path = directory + zipped_file
unzip_path = directory + unzipped_file
logging.info(f"--> Extracting to: {unzip_path}")
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(unzip_path)
zip_ref.close()
os.remove(zip_path)
if compression_type(zipped_file) == '.rar':
zip_path = directory + zipped_file
unzip_path = directory + unzipped_file
logging.info(f"---> Extracting to: {unzip_path}")
patoolib.extract_archive(zip_path, outdir=directory)
os.remove(zip_path)
return
def db_download(url, directory):
""" Downloads files from Dropbox URL """
url = url[:-1] + '0'
file_name = get_title(url)[:-21][10:]
logging.info(f"Dropbox file name: {file_name}")
suffix1 = file_name.endswith(".zip")
suffix2 = file_name.endswith(".rar")
dl_url = url[:-1] + '1'
filepath = directory + file_name
logging.info(f"Downloading dropbox file to: {filepath}")
output = file_name[:-4]
headers = {'user-agent': 'Wget/1.16 (linux-gnu)'}
r = requests.get(dl_url, stream=True, headers=headers)
if r.status_code == 200:
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if suffix1 or suffix2:
unzip(file_name, output, directory)
return True
else:
return False
def grab(url, output_path, quiet=True):
"""
Detects if url belongs to Google Drive or a Dropbox url and calls the relevant method.
You may change logging level by calling grab with quiet=False).
"""
if(quiet==True):
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.WARNING)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)
if GDRIVE_URL in url:
if (gd_download(url, output_path, quiet)):
return True
else:
logging.warning(f"The Google Drive URL {url} is not supported")
return False
if DROPBOX_URL in url:
if(db_download(url, output_path)):
return True
else:
logging.warning(f"The Dropbox URL {url} is not supported")
return False
else:
logging.warning(f"The URL {url} is not supported")
return False
|
duckduckgrayduck/clouddl
|
src/clouddl/clouddl.py
|
clouddl.py
|
py
| 4,484 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "gdrivedl.GDriveDL",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "gdrivedl.GDriveDL",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "patoolib.extract_archive",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "logging.warning",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 121,
"usage_type": "call"
}
] |
17882061657
|
from demisto_sdk.commands.common.constants import CLASSIFIERS_DIR, PACKS_DIR
from demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.json_content_object import \
JSONContentObject
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_JSON_NO_FROM_VERSION = TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / CLASSIFIERS_DIR / 'classifier-sample_new.json'
def test_to_version_no_from_version(datadir):
from packaging.version import parse
obj = JSONContentObject(TEST_JSON_NO_FROM_VERSION, "classifier")
assert obj.from_version == parse("0.0.0")
assert obj.to_version == parse("4.0.0")
class TestFileWithStem:
def test_with_readme_change_log(self):
obj = JSONContentObject(TEST_JSON_NO_FROM_VERSION, "classifier")
assert obj.readme is not None
assert obj.changelog is not None
|
AdouniH/demisto-sdk
|
demisto_sdk/commands/common/content/tests/objects/pack_objects/abstract_pack_objects/json_content_object_test.py
|
json_content_object_test.py
|
py
| 952 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "demisto_sdk.commands.common.tools.src_root",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "demisto_sdk.commands.common.constants.PACKS_DIR",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "demisto_sdk.commands.common.constants.CLASSIFIERS_DIR",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.json_content_object.JSONContentObject",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "packaging.version.parse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "packaging.version.parse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.json_content_object.JSONContentObject",
"line_number": 20,
"usage_type": "call"
}
] |
24285372674
|
#! python3
# program to load current weather from api
# via cmd
# display for today and the next two days
# to run: currentWeather location
import json
import requests
import sys
if len(sys.argv) < 2:
print('More argument pls')
sys.exit()
location = ' '.join(sys.argv[1:])
key = ''
# download
url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3&APPID=%s' % (location, key)
print(url)
try:
res = requests.get(url)
res.raise_for_status()
weatherData = json.loads(res.text)
print(weatherData)
w = weatherData['list']
print('Current Weather', w)
except Exception as e:
print(e)
|
chhatrachhorm/ABS
|
PythonStuff/JsonApi/currentWeather.py
|
currentWeather.py
|
py
| 632 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
}
] |
38044932492
|
import requests
import uuid
from datetime import datetime
import pandas as pd
# https://kcnew.ifrc.org/api/v1/forms find the kpi asset uid for forms here
#from settings import * #to import MYTOKEN and KPIASSETUID
##################
## RUN SETTINGS ##
##################
##https://kobonew.ifrc.org/token/?format=json
MYTOKEN = ""
#"kpi_asset_uid":
KPIASSETUID= ""
# https://kcnew.ifrc.org/api/v1/forms find the kpi asset uid
headers = {
'Authorization': f'Token {MYTOKEN}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
now = datetime.now()
current_time = now.strftime("%Y-%b-%d %I:%M %p")
# Specify the path to your Excel file First Qtr 2022
file_path = 'data/ercs_base_wh_dummy.xlsx'
# Read the Excel file into a Pandas DataFrame
data_frame = pd.read_excel(file_path)
for index, row in data_frame.iterrows():
# Access values in each column for the current row
submission = {
'meta': {
'instanceID': f'uuid:{uuid.uuid4()}',
},
'Supplier_Donor':row['Supplier_Donor'],
'Local_or_Foreign_Receival':row['Local_or_Foreign_Receival'],
'Packing_List_Number':row['Packing_List_Number'],
'Certificate_of_Origin':row['Certificate_of_Origin'],
'Donation_Certificate':row['Donation_Certificate'],
'Waybill_Number':row['Waybill_Number'],
'Contract_Number':row['Contract_Number'],
'Invoice_Number':row['Invoice_Number'],
'Purchase_Requisition_Number':row['Purchase_Requisition_Number'],
'Department_Name':row['Department_Name'],
'Receiver':row['Receiver'],
'Purchase_Order':row['Purchase_Order'],
'Date_of_Reception':row['Date_of_Reception'].date().strftime("%Y-%m-%d"),
'Items_Inspected_approved':row['Items_Inspected_approved'],
'Received_By':row['Received_By'],
'Received_On':row['Received_On'].date().strftime("%Y-%m-%d"),
'Account_Number':row['Account_Number'],
'Project_code':row['Project_code'],
'Items':row['Items'],
'Remark':row['Remark'],
'EXPIRY_DATES':row['EXPIRY_DATES'].date().strftime("%Y-%m-%d"),
'Vender_Manufacturer_No':row['Vender_Manufacturer_No'],
'Vender_seiral_No':row['Vender_seiral_No'],
'Unit_of_Measure':row['Unit_of_Measure'],
'Quantity_Intial':row['Quantity_Intial'],
'Unit_Price':row['Unit_Price'],
'Currency_of_Purchase':row['Currency_of_Purchase'],
}
data_request = requests.post(
f'https://kcnew.ifrc.org/api/v1/submissions',
json={
"id": f"{KPIASSETUID}",
"submission": submission
},
headers=headers
)
|
aklilu/BachUploadToKobo
|
bathcuploadtokobo.py
|
bathcuploadtokobo.py
|
py
| 2,722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 75,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.