text
stringlengths 27
775k
|
---|
import { URL } from "url";
import { SpiderRequest, SpiderRequestWithResponse } from "../../types";
type SignalBase = {
name: string;
};
export type DomSignalChecker = SignalBase & {
matches(req: SpiderRequestWithResponse, $: cheerio.Root): boolean;
};
export type RequestSignalChecker = SignalBase & {
requestMatches(req: SpiderRequest): boolean;
};
export type UrlSignalChecker = SignalBase & {
urlMatches(url: URL): boolean;
};
export type SignalChecker =
| DomSignalChecker
| RequestSignalChecker
| UrlSignalChecker;
|
module Legion
module Extensions
module Todoist
module Runners
module Projects
include Legion::Extensions::Helpers::Lex
def list; end
def create; end
def get; end
def update; end
def delete; end
def collaborators; end
end
end
end
end
end
|
// Imports.
// Long import.
use syntax::ast::{ItemForeignMod, ItemImpl, ItemMac, ItemMod, ItemStatic, ItemDefaultImpl};
use exceedingly::looooooooooooooooooooooooooooooooooooooooooooooooooooooooooong::import::path::{ItemA, ItemB};
use exceedingly::loooooooooooooooooooooooooooooooooooooooooooooooooooooooong::import::path::{ItemA, ItemB};
use list::{
// Some item
SomeItem /* Comment */, /* Another item */ AnotherItem /* Another Comment */, // Last Item
LastItem
};
use test::{ Other /* C */ , /* A */ self /* B */ };
use syntax::{self};
use {/* Pre-comment! */
Foo, Bar /* comment */};
use Foo::{Bar, Baz};
pub use syntax::ast::{Expr_, Expr, ExprAssign, ExprCall, ExprMethodCall, ExprPath};
use syntax::some::{};
use self;
use std::io::{self};
use std::io::self;
mod Foo {
pub use syntax::ast::{
ItemForeignMod,
ItemImpl,
ItemMac,
ItemMod,
ItemStatic,
ItemDefaultImpl
};
mod Foo2 {
pub use syntax::ast::{ItemForeignMod, ItemImpl, ItemMac, ItemMod, ItemStatic, self, ItemDefaultImpl};
}
}
fn test() {
use Baz::*;
use Qux;
}
// Simple imports
use foo::bar::baz as baz ;
use bar::quux as kaas;
use foo;
// With aliases.
use foo::{self as bar, baz};
use foo::{self as bar};
use foo::{qux as bar};
use foo::{baz, qux as bar};
// With absolute paths
use ::foo;
use ::foo::{Bar};
use ::foo::{Bar, Baz};
use ::{Foo};
use ::{Bar, Baz};
|
import React from 'react';
import withSession from '../Session/withSession';
import { MessageCreate, Messages } from '../Message';
import ShowUsers from '../ShowUsers';
const rowStyles = {
display: 'flex',
};
const columnStyles = {
flex: '50%',
};
const MessageBoard = ({ session }) => (
<div style={rowStyles}>
<div style={columnStyles}>
<h2>Message Board</h2>
{session && session.me && <MessageCreate/>}
<Messages limit={5} />
</div>
<div style={columnStyles}>
<ShowUsers/>
</div>
</div>
);
export default withSession(MessageBoard);
|
<?php
namespace App\Http\Controllers;
use Illuminate\Http\Request;
use App\Models\Blog;
use App\Models\BlogCategory;
use App\Models\Comment;
class welcomeController extends Controller
{
public function index()
{
$comments = Comment::get();
$categories = BlogCategory::get();
$blogs = Blog::paginate(20);
return view('web.welcome.index', [
'categories' => $categories,
'blogs' => $blogs,
'comments' => $comments,
]);
}
public function showPost($blog)
{
$comments = Comment::get();
$blog = Blog::where('id', $blog)->find(1);
return view('web.single-post', [
'blog' => $blog,
'comments' => $comments
]);
}
public function search()
{
$search = $_GET['query'];
$blogs = Blog::where('title', 'like', '%' . $search . '%')->get();
return view('web.welcome.index', [
"blogs" => $blogs,
]);
}
}
|
package cypher
/**
* BLANK CIPHER
*
* Reference implementation to
* show how the cyphers should
* be layed out.
*
* Just returns the plaintext as-is.
*
*/
type BlankCypher struct {
Cypher
}
func (c *BlankCypher) Encypher(plain string, key Key) string {
return plain
}
func (c *BlankCypher) Decypher(plain string, key Key) string {
return plain
}
func (c *BlankCypher) Crack(plain string) string {
return plain
}
func (c *BlankCypher) Confidence(plain string) float32 {
return 0.0
}
func (c *BlankCypher) KeyType() KeyType {
return NoKey
}
|
[ ! -d "target/shaders" ] & mkdir "target/shaders"
glslc src/shaders/shader.vert -o target/shaders/vert.spv
glslc src/shaders/shader.frag -o target/shaders/frag.spv
|
using System;
using System.Collections.Generic;
using System.Text;
using Microsoft.VisualStudio.TestTools.UnitTesting;
namespace AlgoTest.LeetCode.GasStation
{
[TestClass]
public class GasStation
{
[TestMethod]
public void Test()
{
//var gas = new int[] { 1, 2, 3, 4, 5 };
//var cost = new int[] { 3, 4, 5, 1, 2 };
//Assert.AreEqual(3, CanCompleteCircuit(gas, cost));
//var gas = new int[] { 4, 5, 2, 6, 5, 3 };
//var cost = new int[] { 3, 2, 7, 3, 2, 9 };
//Assert.AreEqual(-1, CanCompleteCircuit(gas, cost));
//var gas = new int[] { 2, 3, 4 };
//var cost = new int[] { 3, 4, 3 };
//Assert.AreEqual(-1, CanCompleteCircuit(gas, cost));
}
public int CanCompleteCircuit(int[] gas, int[] cost)
{
for (var i = 0; i < gas.Length; i++)
{
if (CanCompleteCircuit(gas, cost, i, i, 0))
{
return i;
}
}
return -1;
}
public bool CanCompleteCircuit(int[] gas, int[] cost, int index, int startIndex, int gasInTank)
{
if (gasInTank == 0 && index != startIndex)
return false;
gasInTank += gas[index];
if (cost[index] > gasInTank)
return false;
var nextIndex = GetNextIndex(index, gas.Length);
if (nextIndex == startIndex)
return true;
if (CanCompleteCircuit(gas, cost, nextIndex, startIndex, gasInTank - cost[index]))
return true;
return false;
}
private static int GetNextIndex(int index, int limit)
{
return index == limit -1 ? 0 : index + 1;
}
}
}
|
---
title: さらなる研究
date: 23/08/2019
---
参考資料として、『各時代の希望』第 54 章「よいサマリヤ人」、第 70 章「わたしの兄弟であるこれらの最も小さい者」、『キリストの実物教訓』第 21 章「大きな淵がおいてあって」、第 27 章「わたしの隣人とはだれのことですか」を読んでください。
「キリストはへだての壁、利己主義、国民と国民をへだてる偏見を打破し、人類家族全体に対する愛をお教えになっている。主は利己主義が規定する狭い囲いから人々を引きあげ、すべての国境線や社会の人為的な差別を廃される。主は隣人と見知らぬ他人、また友人と敵の区別をなくされる。そしてわたしたちに、すべての困窮者を隣人と見、世界をわたしたちの伝道地とみなすようにお教えになっている」(『希望への光』1141 ページ、『思い煩ってはいけません』54 ページ)。
「黄金律の標準は、キリスト教の真の標準である。これに達しないものはみな、にせ物である。キリストが、ご自身をお与えになるほどの価値をお認めになった人間を、低く評価するように人々をさせる宗教、人間の必要や苦しみや権利に対して無関心にするような宗教は、にせの宗教である。貧しい人、苦しむ人、罪深い人の要求を軽んじることによって、わたしたちは、自分がキリストに対する反逆者であることを立証する。キリスト教が世にあって、このように力がないのは、人々がキリストの名を称しながら、その生活においてキリストの品性を否定しているからである」(同1180 ページ、同180 ページ)。
### 話し合いのための質問
`❶ 今週学んだ聖句であなたが好きなものはどれですか。なぜそれが好きなのですか。`
`❷ 木曜日の研究の聖句は、「真理」を持つことに必然的に伴うものをいかに示していますか。`
### まとめ
##### イエスの教えは、神の国の市民や使者である人たちにとっての異なる生き方をはっきり示しています。イエスは旧約聖書の土台に立脚して、貧しく虐げられている人たちへの気遣いを重視するように改めて訴え、それを拡張し、彼の弟子たちが再臨を待つ間、同情と憐れみの民として生きることを強調なさいました。
|
import React from "react";
import PropTypes from 'prop-types';
export class PlaceholderRow extends React.Component {
constructor(props) {
super(props);
}
render() {
return (
<tr className="d-flex">
<th scope="row" className="col-first pl-4 py-3">
<div class="numberCircle">0</div>
</th>
<td className="col-second py-3">(Name...)</td>
<td className="col py-3">(Email...)</td>
</tr>
);
}
}
|
package neotypes
package cats
package object data {
final object implicits extends CatsData
}
|
from bs4 import BeautifulSoup
import requests
from datetime import datetime
import json
from pydantic import BaseModel
class Post(BaseModel):
title: str = ""
author: str = ""
url: str = ""
date: str = ""
class DevToScrap:
def __init__(self, url):
self.url_page = url
self.posts = []
def _find_posts(self):
page = requests.get(self.url_page)
soup = BeautifulSoup(page.content, "html.parser")
home = soup.find(class_="articles-list crayons-layout__content")
self.posts_array = home.find_all(class_="crayons-story")
def _format_date(self, raw_post):
raw_date = raw_post.find("a", class_="crayons-story__tertiary fs-xs").text
new_date = (
f"{raw_date} 21"
if len(raw_date.split(" ")) <= 2
else raw_date.replace("'", "")
)
new_date = datetime.strptime(new_date, "%b %d %y").date()
self.date = new_date.strftime("%d-%m-%Y")
def _get_author(self, raw_post):
author = raw_post.find("div", class_="crayons-story__meta")
self.author = author.find("button").text.strip()
def _get_url(self, raw_url):
href = raw_url.find("a")["href"]
self.url_post = f"https://dev.to{href}"
def _get_title(self, raw_title):
self.title = raw_title.text.strip()
def _export_to_json(self, data):
with open("posts.json", "w") as outfile:
json.dump(data, outfile)
def run_scrap(self):
self._find_posts()
for post in self.posts_array:
title_link = post.find("h2", class_="crayons-story__title")
self._get_title(title_link)
self._get_url(title_link)
self._get_author(post)
self._format_date(post)
post_info = Post(
title=self.title, author=self.author, url=self.url_post, date=self.date
)
self.posts.append(post_info.dict())
self._export_to_json(self.posts)
if __name__ == "__main__":
DevToScrap(url="https://dev.to/top/week").run_scrap()
|
import argparse
import collections
import os
import numpy as np
import torch
import torch.optim as optim
import torchsummary
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from tqdm import tqdm
import skimage.io
from sklearn.metrics import f1_score
import torch.nn as nn
import torch.nn.functional as F
import config
import utils
import classification_dataset
from classification_dataset import ClassificationDataset
from logger import Logger
from config import NB_CATEGORIES
from experiments import MODELS
class SemiBalancedSampler(torch.utils.data.Sampler):
def __init__(self, data_source: ClassificationDataset, epoch_size, balanced_sampling_ratio=0.5):
super().__init__(data_source)
self.balanced_sampling_ratio = balanced_sampling_ratio
self.data_source = data_source
self.epoch_size = epoch_size
def generator(self):
# np.random.seed()
for i in range(self.epoch_size):
selection_policy = np.random.random()
if selection_policy > self.balanced_sampling_ratio:
yield np.random.randint(0, len(self.data_source))
else:
feature = np.random.randint(0, config.NB_CATEGORIES)
yield np.random.choice(self.data_source.samples_idx_by_label[feature])
def __iter__(self):
return self.generator()
def __len__(self):
return self.epoch_size
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, inputs, target):
if not (target.size() == inputs.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), inputs.size()))
max_val = (-inputs).clamp(min=0)
loss = inputs - inputs * target + max_val + ((-max_val).exp() + (-inputs - max_val).exp()).log()
invprobs = F.logsigmoid(-inputs * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
class FocalLoss2(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, inputs, target):
if not (target.size() == inputs.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), inputs.size()))
bce_loss = F.binary_cross_entropy_with_logits(inputs, target, reduction='none')
pt = torch.exp(-bce_loss)
f_loss = (1-pt)**self.gamma * bce_loss
return torch.mean(f_loss) * 28
class F1Loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, y_true):
y_pred = torch.exp(y_pred)
tp = torch.sum(y_true * y_pred, dim=0)
# tn = torch.sum((1 - y_true) * (1 - y_pred), axis=0)
fp = torch.sum((1 - y_true) * y_pred, dim=0)
fn = torch.sum(y_true * (1 - y_pred), dim=0)
eps = 1e-6
p = tp / (tp + fp + eps)
r = tp / (tp + fn + eps)
f1 = 2 * p * r / (p + r + eps)
f1[torch.isnan(f1)] = 0
return 1 - torch.mean(f1)
def balanced_sampling_ratio(epoch):
return np.clip(0.5 - epoch*0.025, 0.05, 1.0)
def train(model_name, fold, run=None, resume_epoch=-1):
run_str = '' if run is None else f'_{run}'
model_info = MODELS[model_name]
checkpoints_dir = f'../output/checkpoints/{model_name}{run_str}_{fold}'
tensorboard_dir = f'../output/tensorboard/{model_name}{run_str}_{fold}'
oof_dir = f'../output/oof/{model_name}{run_str}_{fold}'
os.makedirs(checkpoints_dir, exist_ok=True)
os.makedirs(tensorboard_dir, exist_ok=True)
os.makedirs(oof_dir, exist_ok=True)
print('\n', model_name, '\n')
logger = Logger(tensorboard_dir)
model = model_info.factory(**model_info.args)
model = model.cuda()
# try:
# torchsummary.summary(model, (4, 512, 512))
# print('\n', model_name, '\n')
# except:
# raise
# pass
model = torch.nn.DataParallel(model).cuda()
model = model.cuda()
dataset_train = ClassificationDataset(
fold=fold,
is_training=True,
transform=lambda x: torch.from_numpy(x),
crop_size=model_info.crop_size,
folds_split=model_info.folds_split,
**model_info.dataset_args
)
dataset_valid = ClassificationDataset(
fold=fold,
is_training=False,
transform=lambda x: torch.from_numpy(x),
crop_size=model_info.crop_size,
folds_split=model_info.folds_split,
# use_extarnal=model_info.dataset_args.get('use_extarnal', False)
)
epoch_size = 20000
model.training = True
if model_info.optimiser == 'adam':
print('using adam optimiser')
optimizer = optim.Adam(model.parameters(), lr=model_info.initial_lr)
else:
print('using sgd optimiser')
optimizer = optim.SGD(model.parameters(), lr=model_info.initial_lr, momentum=0.9, weight_decay=1e-5)
if model_info.scheduler == 'cos':
scheduler = utils.CosineAnnealingLRWithRestarts(optimizer, T_max=8, T_mult=1.2)
else:
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=16, verbose=True, factor=0.2)
print('Num training images: {}'.format(len(dataset_train)))
if resume_epoch > -1:
checkpoint = torch.load(f'{checkpoints_dir}/{resume_epoch:03}.pt')
model.module.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
data_loaders = {
'train': DataLoader(dataset_train,
num_workers=16,
batch_size=model_info.batch_size,
sampler=SemiBalancedSampler(dataset_train, epoch_size)),
'val': DataLoader(dataset_valid,
num_workers=16,
batch_size=8,
drop_last=True)
}
loss_scale = {}
if model_info.loss == 'bce':
print('using bce loss')
criterium = {
'bce': nn.BCEWithLogitsLoss()
}
loss_scale = {
'bce': 10.0
}
elif model_info.loss == 'focal_loss2':
print('using focal loss2')
criterium = {
'fl': FocalLoss2()
}
elif model_info.loss == 'focal_loss':
print('using focal loss')
criterium = {
'fl': FocalLoss()
}
elif model_info.loss == 'bce_f1':
print('using focal loss')
criterium = {
'f1': F1Loss(),
'bce': nn.BCEWithLogitsLoss()
}
loss_scale = {
'f1': 1.0,
'bce': 10.0
}
for epoch_num in range(resume_epoch+1, model_info.nb_epochs):
# scheduler.step(epoch=epoch_num)
data_loaders['train'].sampler.balanced_sampling_ratio = balanced_sampling_ratio(epoch_num)
for phase in ['train', 'val']:
epoch_results_true = []
epoch_results_output = []
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
if model_info.is_pretrained:
if epoch_num < 1:
model.module.freeze_encoder()
elif epoch_num == 1:
model.module.unfreeze_encoder()
epoch_loss_items = collections.defaultdict(list)
epoch_loss = []
data_loader = data_loaders[phase]
data_iter = tqdm(enumerate(data_loader), total=len(data_loader))
for iter_num, data in data_iter:
img = data['img'].cuda()
labels = data['labels'].cuda()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
output = model(img)
total_loss = []
for loss_name, loss_fn in criterium.items():
loss = loss_fn(output, labels)
epoch_loss_items[loss_name].append(float(loss))
total_loss.append(loss * loss_scale.get(loss_name, 1.0))
epoch_loss.append(float(sum(total_loss)))
if phase == 'train':
sum(total_loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
epoch_results_true.append(data['labels'].detach().cpu().numpy())
epoch_results_output.append(torch.sigmoid(output).detach().cpu().numpy())
data_iter.set_description(
f'{epoch_num} Loss: {np.mean(epoch_loss):1.4f}')
for loss_name, epoch_loss_item in epoch_loss_items.items():
logger.scalar_summary(f'loss_{loss_name}_{phase}', np.mean(epoch_loss_item), epoch_num)
logger.scalar_summary(f'loss_{phase}', np.mean(epoch_loss), epoch_num)
logger.scalar_summary('loss_'+phase, np.mean(epoch_loss), epoch_num)
logger.scalar_summary('lr', optimizer.param_groups[0]['lr'], epoch_num) # scheduler.get_lr()[0]
epoch_results_true = np.concatenate(epoch_results_true, axis=0)
epoch_results_output = np.concatenate(epoch_results_output, axis=0)
score = np.mean([f1_score(epoch_results_true[:, i],
epoch_results_output[:, i] > 0.375,
average='binary')
for i in range(config.NB_CATEGORIES)])
logger.scalar_summary('f1_' + phase, score, epoch_num)
if phase == 'val':
scheduler.step(metrics=np.mean(epoch_loss), epoch=epoch_num)
np.save(f'{oof_dir}/{epoch_num:03}.npy', np.array([epoch_results_true, epoch_results_output]))
if epoch_num % 2 == 0:
torch.save(
{
'epoch': epoch_num,
'model_state_dict': model.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
f'{checkpoints_dir}/{epoch_num:03}.pt'
)
model.eval()
torch.save(model.state_dict(), f'{checkpoints_dir}/{model_name}_final.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, default='check')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--run', type=str, default='')
parser.add_argument('--fold', type=int, default=-1)
parser.add_argument('--weights', type=str, default='')
parser.add_argument('--epoch', type=int, default=-1)
parser.add_argument('--resume_weights', type=str, default='')
parser.add_argument('--resume_epoch', type=int, default=-1)
args = parser.parse_args()
action = args.action
model = args.model
fold = args.fold
if action == 'train':
try:
train(model_name=model, run=args.run, fold=args.fold, resume_epoch=args.resume_epoch)
except KeyboardInterrupt:
pass
|
using System;
using Web.Models;
namespace Web.Db
{
public partial class ContactSubmission
{
public static ContactSubmission CreateFromViewModel(ContactUsViewModel s)
{
return new ContactSubmission
{
Name = s.Name,
Email = s.Email,
Message = s.Message,
Created = DateTimeOffset.Now,
};
}
}
}
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ModelViewPresenter
{
public class Presenter
{
private readonly IView m_View;
private IModel m_Model;
public Presenter(IView view, IModel model)
{
this.m_View = view;
this.m_Model = model;
}
public void ReverseTextValue()
{
string reversed = ReverseString(m_View.TextValue);
m_Model.Reverse(reversed);
}
public void SetTextValue()
{
m_Model.Set(m_View.TextValue);
}
private static string ReverseString(string s)
{
char[] arr = s.ToCharArray();
Array.Reverse(arr);
return new string(arr);
}
}
}
|
/*
* Copyright 2007-2021, CIIC Guanaitong, Co., Ltd.
* All rights reserved.
*/
package com.ciicgat.grus.excel;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* @Auther: chunhong.wan
* @Date: 2020/12/10
* @Description:
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
public @interface ExcelColumn {
/**
* 对应excel中的列,从0开始
* @return
*/
int column();
/**
* 对应excel中的首行的名称
* @return
*/
String name();
/**
* converter pattern
* 日期类型需提供,否则按照 yyyy-MM-dd HH:mm:ss
* @example "yyyy-MM-dd"
*/
String pattern() default "";
}
|
* [The Basics](#basics)
* [Collections](#collections)
* [Operators](#operators)
* [Control Flow](#flow)
* [Scope](#scope)
* [Modules](#modules)
* [Classes](#classes)
* [Functions](#functions)
|
export * from './account';
export * from './account-pool';
export * from './account-pools';
export * from './account-registry';
|
package com.zxiang.project.business.supplyTissue.service;
import com.zxiang.project.business.supplyTissue.domain.SupplyTissue;
import java.util.List;
/**
* 补纸记录 服务层
*
* @author ZXiang
* @date 2018-09-09
*/
public interface ISupplyTissueService
{
/**
* 查询补纸记录信息
*
* @param supplyTissueId 补纸记录ID
* @return 补纸记录信息
*/
public SupplyTissue selectSupplyTissueById(Integer supplyTissueId);
/**
* 查询补纸记录列表
*
* @param supplyTissue 补纸记录信息
* @return 补纸记录集合
*/
public List<SupplyTissue> selectSupplyTissueList(SupplyTissue supplyTissue);
/**
* 新增补纸记录
*
* @param supplyTissue 补纸记录信息
* @return 结果
*/
public int insertSupplyTissue(SupplyTissue supplyTissue);
/**
* 修改补纸记录
*
* @param supplyTissue 补纸记录信息
* @return 结果
*/
public int updateSupplyTissue(SupplyTissue supplyTissue);
/**
* 删除补纸记录信息
*
* @param ids 需要删除的数据ID
* @return 结果
*/
public int deleteSupplyTissueByIds(String ids);
}
|
using System;
using System.Drawing;
using System.Runtime.InteropServices;
using System.Windows.Forms;
namespace MetaCopy {
public partial class MiniMode : Form {
private MetaCore mainForm;
private const int WM_NCHITTEST = 0x84;
private const int HT_CAPTION = 0x2;
public const int WM_NCLBUTTONDOWN = 0xA1;
[DllImport("user32.dll")]
public static extern int SendMessage(IntPtr hWnd, int Msg, int wParam, int lParam);
[DllImportAttribute("user32.dll")]
public static extern bool ReleaseCapture();
public MiniMode() {
InitializeComponent();
}
public void setMain(MetaCore form)
{
this.mainForm = form;
}
protected override void WndProc(ref Message m) {
base.WndProc(ref m);
if (m.Msg == WM_NCHITTEST)
m.Result = (IntPtr)(HT_CAPTION);
}
private void onPanelMouseMove(object sender, MouseEventArgs e) {
if (e.Button == MouseButtons.Left) {
ReleaseCapture();
SendMessage(Handle, WM_NCLBUTTONDOWN, HT_CAPTION, 0);
}
}
void panel_DragEnter(object sender, DragEventArgs e) {
if (e.Data.GetDataPresent(DataFormats.FileDrop)) e.Effect = DragDropEffects.Copy;
}
private void panel_DragDrop(object sender, DragEventArgs e) {
mainForm.panel1_DragDrop(sender, e);
if(mainForm.autoCheck.Checked) mainForm.doCopy(this, null);
}
private void doMaximize(object sender, EventArgs e) {
Hide();
mainForm.Show();
}
private void onCopyBtn(object sender, EventArgs e) {
mainForm.doCopy(mainForm, e);
}
public void copyStart()
{
btnCopy.Text = "copying";
btnCopy.backColor = Color.FromArgb(255, 27, 221, 151);
btnCopy.BackColor = Color.FromArgb(255, 27, 221, 151);
btnCopy.Enabled = false;
}
public void copyEnd()
{
btnCopy.Text = "COPY";
btnCopy.Enabled = true;
btnCopy.backColor = Color.FromArgb(255, 242, 208, 59);
btnCopy.BackColor = Color.FromArgb(255, 242, 208, 59);
}
}
}
|
// ysoftman
// readcloser test
package main
import (
"bytes"
"fmt"
"io/ioutil"
)
func main() {
//////////////////////////
// 일반 슬라이스에서 할당은 참조고 copy 를 해야지만 복사가 된다.
// https://blog.golang.org/go-slices-usage-and-internals
a := []byte{}
a = append(a, 1, 2, 3, 4, 5)
fmt.Println("a:", a)
b := a
c := a[0:]
d := make([]byte, 10, 10)
copy(d, a)
a[2] = 10
a[3] = 11
a[4] = 12
fmt.Println("a:", a)
fmt.Println("b:", b)
fmt.Println("c:", c)
fmt.Println("d:", d)
//////////////////////////
// bytes buffer test
bb1 := new(bytes.Buffer)
bb1.WriteString("ysoftman")
// 값 복사
// 엄밀히 말하면 bytes.Buffer []buf 는 슬라이스라서 copy 를 쓰지 않으면 복사되지 않고 참조된다.
// 하지만 Buffer.buf 를 직접 인덱싱할수 없어 bb1 을 조작하더라도 bb1 의 offset 이 변경되는것이지
// 실제 데이터값 바뀌는것이 아니기 때문에 bb2, bb2 각각의 슬라이스 stuct offset 은 유지되어 영향을 받지 않는다.
bb2 := bytes.NewBuffer(bb1.Bytes())
bb3 := bb1.Bytes()
bb1.Reset()
fmt.Println("bb1:", bb1.String())
fmt.Println("bb2:", bb2.String())
fmt.Println("bb3:", string(bb3))
//////////////////////////
// readcloser 생성 및 사용
lemon := new(bytes.Buffer)
lemon.WriteString("lemon")
fmt.Println("lemon:", lemon.String())
// io.ReadCloser 생성
// NopCloser() 는 Close() 실제 버퍼 데이터를 변경하지 않고
// nil만 리턴해서 rc readall 로 offset 이동되어도
// lemon 과 공유되는 버퍼에는 영향을 주지 않는다.
rc := ioutil.NopCloser(bytes.NewBuffer(lemon.Bytes()))
fmt.Println("lemon:", lemon.String())
// readadll 하면 read 시 버퍼에 대한 offset 이 EOF 로 이동돼
// rc 로는 버퍼 내용을 읽을 수 없게 된다.
bytes1, err1 := ioutil.ReadAll(rc)
if err1 == nil {
fmt.Println("ReadAll(rc):", string(bytes1))
}
bytes2, err2 := ioutil.ReadAll(rc)
if err2 == nil {
fmt.Println("ReadAll(rc):", string(bytes2))
}
// rc EOF 만 이동된것이라 lemon 값은 유지 된다.
fmt.Println("lemon:", lemon.String())
// bytes 로 새로 새로 만들어야 한다.
rc2 := ioutil.NopCloser(bytes.NewBuffer(bytes1))
bytes3, err3 := ioutil.ReadAll(rc2)
if err3 == nil {
fmt.Println("ReadAll(rc2):", string(bytes3))
}
}
|
/**
* @file tvinpaint.c
* @brief Total variation regularized inpainting demo for IPOL
* @author Pascal Getreuer <[email protected]>
*
*
* Copyright (c) 2011-2012, Pascal Getreuer
* All rights reserved.
*
* This program is free software: you can use, modify and/or
* redistribute it under the terms of the simplified BSD License. You
* should have received a copy of this license along this program. If
* not, see <http://www.opensource.org/licenses/bsd-license.html>.
*/
/**
* @mainpage
* @verbinclude readme.txt
*/
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "num.h"
#include "tvreg.h"
#include "imageio.h"
/** @brief Display intensities in the range [0,DISPLAY_SCALING] */
#define DISPLAY_SCALING 255
/** @brief Quality for writing JPEG images */
#define JPEGQUALITY 95
#ifdef NUM_SINGLE
#define IMAGEIO_NUM (IMAGEIO_SINGLE)
#else
#define IMAGEIO_NUM (IMAGEIO_DOUBLE)
#endif
/** @brief struct representing an image */
typedef struct
{
/** @brief Float image data */
num *Data;
/** @brief Image width */
int Width;
/** @brief Image height */
int Height;
/** @brief Number of channels */
int NumChannels;
} image;
/** @brief Print program explanation and usage */
void PrintHelpMessage()
{
puts(
"Total variation regularized inpainting IPOL demo, P. Getreuer, 2012\n\n"
"Syntax: tvinpaint <D> <lambda> <input> <inpainted>\n");
puts("where <D>, <input>, and <inpainted> are "
READIMAGE_FORMATS_SUPPORTED " images.\n");
puts("Example:\n"
" tvinpaint mountains-D.bmp 1e3 mountains-f.bmp inpainted.bmp\n");
}
int Inpaint(image u, image f, image D, num Lambda);
num ComputeRmse(image f, image u);
void ThresholdD(image D, num Lambda);
int IsGrayscale(image f);
int main(int argc, char **argv)
{
const char *InputFile, *DomainFile, *OutputFile;
image f = {NULL, 0, 0, 0}, u = {NULL, 0, 0, 0}, D = {NULL, 0, 0, 0};
num Lambda;
int Status = 1;
if(argc != 5)
{
PrintHelpMessage();
return 0;
}
/* Read command line arguments */
DomainFile = argv[1];
Lambda = (num)atof(argv[2]);
InputFile = argv[3];
OutputFile = argv[4];
/* Read the input images */
if(!(f.Data = (num *)ReadImage(&f.Width, &f.Height, InputFile,
IMAGEIO_RGB | IMAGEIO_PLANAR | IMAGEIO_NUM)) ||
!(D.Data = (num *)ReadImage(&D.Width, &D.Height, DomainFile,
IMAGEIO_RGB | IMAGEIO_PLANAR | IMAGEIO_NUM)))
goto Catch;
else if(f.Width != D.Width || f.Height != D.Height)
{
fprintf(stderr, "Size mismatch: D is %dx%d but f is %dx%d\n",
D.Width, D.Height, f.Width, f.Height);
goto Catch;
}
f.NumChannels = IsGrayscale(f) ? 1 : 3;
u = f;
/* Allocate space for the inpainted image */
if(!(u.Data = (num *)Malloc(sizeof(num) * ((size_t)f.Width)
* ((size_t)f.Height) * f.NumChannels)))
{
fprintf(stderr, "Memory allocation failed.\n");
goto Catch;
}
if(!Inpaint(u, f, D, Lambda))
{
fprintf(stderr, "Failure!\n");
goto Catch;
}
/* Write inpainted image */
if(!WriteImage(u.Data, u.Width, u.Height, OutputFile,
((u.NumChannels == 1) ? IMAGEIO_GRAYSCALE : IMAGEIO_RGB)
| IMAGEIO_PLANAR | IMAGEIO_NUM, JPEGQUALITY))
fprintf(stderr, "Error writing to \"%s\".\n", OutputFile);
Status = 0;
Catch:
if(u.Data)
Free(u.Data);
if(D.Data)
Free(D.Data);
if(f.Data)
Free(f.Data);
return Status;
}
/**
* @brief TV regularized inpainting
* @param u denoised image
* @param f given noisy image
* @param D the inpainting domain
* @param Lambda the fidelity weight
* @return 1 on success, 0 on failure
*
* This wrapper routine sets up the inpainting problem. The actual
* split Bregman computation is performed in TvRestore().
*/
int Inpaint(image u, image f, image D, num Lambda)
{
tvregopt *Opt = NULL;
const long NumPixels = ((long)f.Width) * ((long)f.Height);
num *Red = D.Data;
num *Green = D.Data + NumPixels;
num *Blue = D.Data + 2*NumPixels;
long n, k;
int Success = 0;
if(!(Opt = TvRegNewOpt()))
{
fprintf(stderr, "Memory allocation failed\n");
return 0;
}
memcpy(u.Data, f.Data, sizeof(num)*f.Width*f.Height*f.NumChannels);
/* Convert the mask into spatially-varing lambda */
for(n = 0; n < NumPixels; n++)
if(0.299*Red[n] + 0.587*Green[n] + 0.114*Blue[n] > 0.5)
{
D.Data[n] = 0; /* Inside of the inpainting domain */
/* Set u = 0.5 within D */
for(k = 0; k < u.NumChannels; k++)
u.Data[n + k*NumPixels] = 0.5;
}
else
D.Data[n] = Lambda; /* Outside of the inpainting domain */
TvRegSetVaryingLambda(Opt, D.Data, D.Width, D.Height);
TvRegSetMaxIter(Opt, 10000);
TvRegSetTol(Opt, (num)1e-5);
/* TvRestore performs the split Bregman inpainting */
if(!TvRestore(u.Data, f.Data, f.Width, f.Height, f.NumChannels, Opt))
{
fprintf(stderr, "Error in computation.\n");
goto Catch;
}
Success = 1;
Catch:
TvRegFreeOpt(Opt);
return Success;
}
/** @brief Test whether image is grayscale */
int IsGrayscale(image f)
{
const long NumPixels = ((long)f.Width) * ((long)f.Height);
const num *Red = f.Data;
const num *Green = f.Data + NumPixels;
const num *Blue = f.Data + 2*NumPixels;
long n;
for(n = 0; n < NumPixels; n++)
if(Red[n] != Green[n] || Red[n] != Blue[n])
return 0;
return 1;
}
|
package expo.modules.kotlin
import expo.modules.kotlin.modules.Module
class ModuleRegistry : Iterable<ModuleHolder> {
private val registry = mutableMapOf<String, ModuleHolder>()
fun register(module: Module) {
val holder = ModuleHolder(module)
registry[holder.name] = holder
}
fun register(provider: ModulesProvider) = apply {
provider.getModulesList().forEach { type ->
val module = type.newInstance()
register(module)
}
}
fun hasModule(name: String): Boolean = registry.containsKey(name)
fun getModule(name: String): Module? = registry[name]?.module
fun getModuleHolder(name: String): ModuleHolder? = registry[name]
override fun iterator(): Iterator<ModuleHolder> = registry.values.iterator()
}
|
require "record_accessors/version"
module RecordAccessors
def self.included(base)
base.class_eval do
class << self
def available_attributes
@available_attributes ||= []
end
alias_method :attr_accessor_without_tracking, :attr_accessor
def attr_accessor(*names)
available_attributes.concat(names)
attr_accessor_without_tracking(*names)
end
end
end
end
def hash_from_attributes
self.class.available_attributes.inject({}) do |h, key|
h[key] = self.send(key)
h
end
end
def initialize_from_attributes(attrs = {})
attrs.each do |name, value|
if respond_to?("#{name}=")
send("#{name}=", value)
end
end
end
end
|
// The MIT License (MIT)
//
// Copyright (c) Andrew Armstrong/FacticiusVir 2019
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// This file was automatically generated and should not be edited directly.
using System;
using System.Runtime.InteropServices;
namespace SharpVk.Interop
{
/// <summary>
///
/// </summary>
[StructLayout(LayoutKind.Sequential)]
public unsafe partial struct GraphicsPipelineCreateInfo
{
/// <summary>
/// The type of this structure.
/// </summary>
public SharpVk.StructureType SType;
/// <summary>
/// Null or an extension-specific structure.
/// </summary>
public void* Next;
/// <summary>
/// A bitmask of PipelineCreateFlagBits controlling how the pipeline
/// will be generated, as described below.
/// </summary>
public SharpVk.PipelineCreateFlags Flags;
/// <summary>
/// The number of entries in the pStages array.
/// </summary>
public uint StageCount;
/// <summary>
/// An array of size stageCount structures of type
/// PipelineShaderStageCreateInfo describing the set of the shader
/// stages to be included in the graphics pipeline.
/// </summary>
public SharpVk.Interop.PipelineShaderStageCreateInfo* Stages;
/// <summary>
/// An instance of the PipelineVertexInputStateCreateInfo structure.
/// </summary>
public SharpVk.Interop.PipelineVertexInputStateCreateInfo* VertexInputState;
/// <summary>
/// An instance of the PipelineInputAssemblyStateCreateInfo structure
/// which determines input assembly behavior.
/// </summary>
public SharpVk.Interop.PipelineInputAssemblyStateCreateInfo* InputAssemblyState;
/// <summary>
/// An instance of the PipelineTessellationStateCreateInfo structure,
/// or Null if the pipeline does not include a tessellation control
/// shader stage and tessellation evaluation shader stage.
/// </summary>
public SharpVk.Interop.PipelineTessellationStateCreateInfo* TessellationState;
/// <summary>
/// An instance of the PipelineViewportStateCreateInfo structure, or
/// Null if the pipeline has rasterization disabled.
/// </summary>
public SharpVk.Interop.PipelineViewportStateCreateInfo* ViewportState;
/// <summary>
/// An instance of the PipelineRasterizationStateCreateInfo structure.
/// </summary>
public SharpVk.Interop.PipelineRasterizationStateCreateInfo* RasterizationState;
/// <summary>
/// An instance of the PipelineMultisampleStateCreateInfo, or Null if
/// the pipeline has rasterization disabled.
/// </summary>
public SharpVk.Interop.PipelineMultisampleStateCreateInfo* MultisampleState;
/// <summary>
/// An instance of the PipelineDepthStencilStateCreateInfo structure,
/// or Null if the pipeline has rasterization disabled or if the
/// subpass of the render pass the pipeline is created against does not
/// use a depth/stencil attachment.
/// </summary>
public SharpVk.Interop.PipelineDepthStencilStateCreateInfo* DepthStencilState;
/// <summary>
/// An instance of the PipelineColorBlendStateCreateInfo structure, or
/// Null if the pipeline has rasterization disabled or if the subpass
/// of the render pass the pipeline is created against does not use any
/// color attachments.
/// </summary>
public SharpVk.Interop.PipelineColorBlendStateCreateInfo* ColorBlendState;
/// <summary>
/// A pointer to PipelineDynamicStateCreateInfo and is used to indicate
/// which properties of the pipeline state object are dynamic and can
/// be changed independently of the pipeline state. This can be Null,
/// which means no state in the pipeline is considered dynamic.
/// </summary>
public SharpVk.Interop.PipelineDynamicStateCreateInfo* DynamicState;
/// <summary>
/// The description of binding locations used by both the pipeline and
/// descriptor sets used with the pipeline.
/// </summary>
public SharpVk.Interop.PipelineLayout Layout;
/// <summary>
/// A handle to a render pass object describing the environment in
/// which the pipeline will be used; the pipeline must only be used
/// with an instance of any render pass compatible with the one
/// provided.
/// </summary>
public SharpVk.Interop.RenderPass RenderPass;
/// <summary>
/// The index of the subpass in the render pass where this pipeline
/// will be used.
/// </summary>
public uint Subpass;
/// <summary>
/// A pipeline to derive from.
/// </summary>
public SharpVk.Interop.Pipeline BasePipelineHandle;
/// <summary>
/// An index into the pCreateInfos parameter to use as a pipeline to
/// derive from.
/// </summary>
public int BasePipelineIndex;
}
}
|
using MediatR;
namespace StEn.FinCalcR.WinUi.Events
{
public class HintEvent : INotification
{
public HintEvent(string message)
{
this.Message = message;
}
public string Message { get; }
}
}
|
// test_bit_array.cpp
#include <ulib/utility/bit_array.h>
#ifndef U_HTTP2_DISABLE
# include <ulib/utility/http2.h>
#endif
int U_EXPORT main(int argc, char** argv)
{
U_ULIB_INIT(argv);
U_TRACE(5, "::main(%d,%p)", argc, argv)
UBitArray addrmask;
uint32_t i, nbits = addrmask.getNumBits();
U_ASSERT_EQUALS( addrmask.count(), 0 )
for (i = 0; i < nbits; ++i)
{
U_ASSERT_EQUALS( addrmask[i], false )
}
addrmask.setAll();
U_ASSERT_EQUALS( addrmask.count(), nbits )
for (i = 0; i < nbits; ++i)
{
U_ASSERT( addrmask[i] )
}
addrmask.clearAll();
U_ASSERT_EQUALS( addrmask.count(), 0 )
for (i = 0; i < nbits; ++i)
{
U_ASSERT_EQUALS( addrmask[i], false )
}
addrmask.setAll();
addrmask.set(1024);
++nbits;
U_ASSERT_EQUALS( addrmask.count(), nbits )
for (i = 0; i < nbits; ++i)
{
U_ASSERT( addrmask[i] )
}
for (nbits = addrmask.getNumBits(); i < nbits; ++i)
{
U_ASSERT_EQUALS( addrmask[i], false )
}
#if defined(DEBUG) && !defined(U_HTTP2_DISABLE)
UHTTP2::testHpackDynTbl();
#endif
}
|
package np.core;
import java.io.*;
public class IO {
public static String LoadString(InputStream stream) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
String line;
StringBuffer buffer = new StringBuffer();
while((line = reader.readLine()) != null) {
buffer.append(line + '\n');
}
return buffer.toString();
} catch(Exception ex) {
ex.printStackTrace();
return "";
}
}
public static String LoadString(String path) {
try {
return LoadString(new FileInputStream(path));
} catch(Exception ex) {
InputStream stream = IO.class.getResourceAsStream(path);
if(stream == null) {
stream = IO.class.getClassLoader().getResourceAsStream(path);
}
if(stream != null) {
return LoadString(stream);
} else {
return null;
}
}
}
}
|
import { Component, OnInit } from "@angular/core";
import { TvListService } from "~/services/tvlist.service";
import { RouterExtensions } from "nativescript-angular/router";
import { MiscService } from "~/services/misc.service";
@Component({
selector: "ns-landing",
templateUrl: "./explore.component.html",
styleUrls: ["./explore.component.scss"]
})
export class ExploreComponent implements OnInit {
imagesLength = 12;
tvs: any[];
constructor(private tvListService: TvListService,
public miscService:MiscService,
private routerExtensions: RouterExtensions) { }
ngOnInit() {
this.tvListService.allLinksLoaded.subscribe(response => {
if (!response)
return;
console.log(this.tvListService.tvLinks.length);
this.tvs = [];
for (let i = 0; i < this.imagesLength; i++) {
var tvLinks = this.tvListService.tvLinks;
var randomNumber = Math.floor(Math.random() * tvLinks.length);
this.tvs.push(tvLinks[randomNumber])
}
});
}
getSrc(index) {
let tv = this.tvs[index - 1];
if (!tv)
return "";
// console.log(tv);
return tv.logo;
}
imageTapped(index) {
var tvModel = this.tvs[index - 1];
this.openPlayer(tvModel);
}
openPlayer(item) {
this.miscService.openPlayer(item);
}
viewAll(){
this.routerExtensions.navigate(["home",'search'],{
clearHistory:true
});
}
}
|
-- | This module is used to implement a wrapper program for propellor
-- distribution.
--
-- Distributions should install this program into PATH.
-- (Cabal builds it as dist/build/propellor/propellor).
--
-- This is not the propellor main program (that's config.hs).
-- This bootstraps ~/.propellor/config.hs, builds it if
-- it's not already built, and runs it.
--
-- If ./config.hs exists and looks like a propellor config file,
-- it instead builds and runs in the current working directory.
module Propellor.Wrapper (runWrapper) where
import Propellor.DotDir
import Propellor.Message
import Propellor.Bootstrap
import Utility.Monad
import Utility.Directory
import Utility.FileMode
import Utility.Process
import Utility.Process.NonConcurrent
import Utility.FileSystemEncoding
import System.Environment (getArgs)
import System.Exit
import System.Posix
import Data.List
import Control.Monad.IfElse
import Control.Applicative
import Prelude
runWrapper :: IO ()
runWrapper = withConcurrentOutput $ do
useFileSystemEncoding
go =<< getArgs
where
go ["--init"] = interactiveInit
go args = ifM configInCurrentWorkingDirectory
( buildRunConfig args
, ifM (doesDirectoryExist =<< dotPropellor)
( do
checkRepoUpToDate
changeWorkingDirectory =<< dotPropellor
buildRunConfig args
, error "Seems that ~/.propellor/ does not exist. To set it up, run: propellor --init"
)
)
buildRunConfig :: [String] -> IO ()
buildRunConfig args = do
unlessM (doesFileExist "propellor") $ do
buildPropellor Nothing
putStrLn ""
putStrLn ""
(_, _, _, pid) <- createProcessNonConcurrent (proc "./propellor" args)
exitWith =<< waitForProcessNonConcurrent pid
configInCurrentWorkingDirectory :: IO Bool
configInCurrentWorkingDirectory = ifM (doesFileExist "config.hs")
( do
-- This is a security check to avoid using the current
-- working directory as the propellor configuration
-- if it's not owned by the user, or is world-writable,
-- or group writable. (Some umasks may make directories
-- group writable, but typical ones do not.)
s <- getFileStatus "."
uid <- getRealUserID
if fileOwner s /= uid
then unsafe "you don't own the current directory"
else if checkMode groupWriteMode (fileMode s)
then unsafe "the current directory is group writable"
else if checkMode otherWriteMode (fileMode s)
then unsafe "the current directory is world-writable"
else ifM mentionspropellor
( return True
, notusing "it does not seem to be a propellor config file"
)
, return False
)
where
unsafe s = notusing (s ++ ". This seems unsafe.")
notusing s = error $ "Not using ./config.hs because " ++ s
mentionspropellor = ("Propellor" `isInfixOf`) <$> readFile "config.hs"
|
subroutine takemolec(kk,infoonly,molinquire,indexanswer)
*
* to add new molecules, just fill in with name as in Tsuji molecular file
* starting with the first ' ' at th ened of data block molinpresmo.
* BPz 10/10-95
*
* this routine is to be used after a call to jon,
* -if eqmol has been called also-, in order to get the pressures
* one needs placed in the common 'fullequilibrium'.
* This is the routine to change if one wants more or/and other
* molecular pressure to be kept.
* The values in the common fullequilibrium can then be used for
* computation of opacities, printing, etc.
* kk is the depth point to be adressed.
* 020890 BPlez
*
include 'spectrum.inc'
include 'tsuji.par'
integer maxim
parameter (maxim=1000)
logical tsuswitch,tsuji,first
character*20 molinpresmo(maxmol),nametryck(maxmol)
doubleprecision parptsuji,xmettryck,xiontryck,partryck
doubleprecision presneutral,presion,presion2,presion3
common /tsuji/ tsuji,tsuswitch,nattsuji,nmotsuji,
& parptsuji(maxim+400)
common /fullequilibrium/ partryck(ndp,maxmol),
& xmettryck(ndp,maxmet),xiontryck(ndp,maxmet),nametryck
common /orderedpress/ presneutral(ndp,100),presion(ndp,100),
& presion2(ndp,100),presion3(ndp,100)
COMMON/CARC3/F1P,F3P,F4P,F5P,HNIC,PRESMO(30)
common/ci1/fl2(5),parco(45),parq(180),shxij(5),tparf(4),
& xiong(16,5),eev,enamn,sumh,xkbol,nj(16),iel(16),
& nel,summ
* common from eqmol
logical switer,infoonly
character*20 mol(maxim),molinquire
integer nelem(5,maxim),natom(5,maxim),mmax(maxim),nelemx(100),
& nmol,
& nmetal,nimax,marcsnelemx(20),marcsnelemj,j,atindex(20),
& molindex(maxmol),index,indexanswer
real exponent(maxim),g0(100),g1(100),g2(100),g3(100),sumpress
doubleprecision IP(100),KP(100),uiidui(100),eps,fp(100),
& ppmol(maxim),apm(maxim),c(maxim,5),p(100),ccomp(100),
& ipp(100),ippp(100),d00(maxim),qmol(maxim),
& reducedmass15(maxim)
character*20 molcode(maxim)
COMMON/COMFH1/C,NELEM,NATOM,MMAX,PPMOL,d00,qmol,APM,MOL,IP,
& ipp,ippp,g0,g1,g2,g3,CCOMP,exponent,reducedmass15,
& UIIDUI,P,FP,KP,eps,NELEMX,NIMAX,NMETAL,NMOL,switer,
& molcode
real abund(16),anjon(16,5),h(5),part(16,5),dxi,f1,f2,f3,
& f4,f5,xkhm,xmh,xmy
common/ci5/abund,anjon,h,part,dxi,f1,f2,f3,f4,f5,xkhm,xmh,xmy
save atindex,molindex,first
data molinpresmo/'H - ','H H ',
& 'H H + ','H O H ',
& 'O H ','C H ',
& 'C O ','C N ',
& 'C C ','N N ',
& 'O O ','N O ',
& 'N H ','C C H H ',
& 'H C N ','C C H ',
& ' ','H S ',
& 'SiH ','C C C H ',
& 'C C C ','C S ',
& 'SiC ','SiC C ',
& 'N S ','SiN ',
& 'SiO ','S O ',
& 'S S ','SiS ',
& 'TiO ','V O ',
& 'ZrO ','MgH ',
& 'CaH ','H F ',
& 'SiO ','H Cl ',
& 'FeH ','SiH ',
& 'N O ','C H H H H ',
& 'AlH ','CrH ',
& 'LaO ','TiH ',
& 'Y O ','NaH ',
& ' ',' ',
& ' ',' ',
& ' ',' ',
& ' ',' ',
& ' ',' ',
& ' ',' '/
data first/.true./
data marcsnelemx/ 1, 2, 6, 7, 8, 10, 11, 12, 13, 14, 16, 19,
& 20, 24, 26, 28, 21, 22, 23, 25/
data molindex/maxmol*0/
* store the metals and ions
* The 16 first are indexed like in jon:
* H, He, C, N, O, Ne, Na, Mg, Al, Si, S, K, Ca, Cr, Fe, Ni
* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
* and then:
* Sc, Ti, V, Mn
* 17 18 19 20
*
cc if(nattsuji.gt.maxmet) stop 'takemolec: maxmet too small'
if (infoonly) then
* get index for molecular pressure in partryck for molecule molinquire
*
indexanswer=0
do j=1,maxmol
if (molinquire.eq.molinpresmo(j)) then
indexanswer=j
endif
enddo
if (indexanswer.eq.0) then
print*,'takemolec infoonly: molec. not implemented',molinquire
print*,' See and complete data block molinpresmo!'
cccc print*,' complete also atomdata table with ID and IP'
stop
endif
return
endif
if (first) then
do i=1,100
do j=1,ndp
presneutral(j,i)=-1.0
presion(j,i)=-1.0
presion2(j,i)=-1.0
presion3(j,i)=-1.0
enddo
enddo
* find index for atomic and molecular pressures
do i=1,nmetal
nelemi=nelemx(i)
do j=1,20
marcsnelemj=marcsnelemx(j)
if (nelemi.eq.marcsnelemj) then
atindex(j)=i
endif
enddo
enddo
* the indexes from 1 to 30 for partryck correspond to
* the ones in presmo (common carc3). See data block molinpresmo.
* 31 is TiO, 32 is VO, 33 is ZrO, 34 is MgH., 35 is CaH , 36 is HF
* ( compatible with new bsyn !!!)
*********************************************************************
do i=1,maxmol
if (molinpresmo(i).ne.' ') then
j=1
do while (molinpresmo(i).ne.mol(j).and.j.lt.nmol)
j=j+1
enddo
if (molinpresmo(i).eq.mol(j)) then
molindex(i)=4*nmetal+j
else
* this is when we reach the last molecule in Tsuji's list
* without finding the right molecule. we flag with negative pressure.
molindex(i)=4*nmetal+nmol+2
endif
else
* partial pressure=0.0
molindex(i)=4*nmetal+nmol+1
endif
enddo
if (4*nmetal+nmol+2.gt.maxim+400) stop
& 'takemolec; maxim dim. too small! ERROR!'
if (nmol+2.gt.maxim) stop
& 'takemolec; maxim dim. too small! ERROR!'
parptsuji(4*nmetal+nmol+1) = 1.e-31
parptsuji(4*nmetal+nmol+2) = -1.0
mol(nmol+1)='zero pressure'
mol(nmol+2)='not existing'
endif
*
do j=1,20
xmettryck(kk,j)=parptsuji(atindex(j))
xiontryck(kk,j)=parptsuji(atindex(j)+nmetal)
enddo
* Set partition functions and ionisation fractions to the values used
* in eqmol_pe/die_pe. Part(16,5) and anjon(16,5) are used in detabs for
* the computation of continuous opacities.
cc print*,'takemolec old part, new part for atoms for depth',kk
do j=1,16
if (first) then
if ((nj(j).eq.4.and.g3(marcsnelemx(j)).eq.0.).or.
& (nj(j).gt.4) ) print*,
& 'WARNING, takemolec: number of ionisation stages considered',
& ' for',marcsnelemx(j),
& ' lower than requested in input file jonabs.dat!'
endif
index=atindex(j)
nelemi=nelemx(index)
cc print*,nelemi,marcsnelemx(j),' =?'
marcsnelemj=marcsnelemx(j)
cc print*,part(j,1),part(j,2),part(j,3),part(j,4)
cc print*,g0(marcsnelemj),g1(marcsnelemj),g2(marcsnelemj),
cc & g3(marcsnelemj)
part(j,1)=g0(marcsnelemj)
part(j,2)=g1(marcsnelemj)
part(j,3)=g2(marcsnelemj)
part(j,4)=g3(marcsnelemj)
sumpress=parptsuji(index)+
& parptsuji(index+nmetal)+
& parptsuji(index+2*nmetal)+
& parptsuji(index+3*nmetal)
anjon(j,1)=parptsuji(index)/sumpress
anjon(j,2)=parptsuji(index+nmetal)/sumpress
anjon(j,3)=parptsuji(index+2*nmetal)/sumpress
anjon(j,4)=parptsuji(index+3*nmetal)/sumpress
enddo
*
* now, the pressures indexed with the atomic number
* The arrays are initially set to -1.0 to allow selection later in bsyn of the
* elements not treated in eqmol. NOTE: does not work for
* presion3 not treated in eqmol, and which is 0.0 after next loop.
do i=1,nmetal
nelemi=nelemx(i)
presneutral(kk,nelemi)=parptsuji(i)
presion(kk,nelemi)=parptsuji(i+nmetal)
presion2(kk,nelemi)=parptsuji(i+2*nmetal)
presion3(kk,nelemi)=parptsuji(i+3*nmetal)
enddo
* and the molecules
do i=1,maxmol
partryck(kk,i)=parptsuji(molindex(i))
if (first) then
nametryck(i)=mol(molindex(i)-4*nmetal)
endif
enddo
first=.false.
***************************** debug*******************
ccc if (kk.eq.1) print*,'same?',(partryck(kk,i),i=1,35)
***************************** debug*******************
return
end
|
# cording: utf-8
require 'term/table'
RSpec.describe IO do
it '#table 制表' do
t = IO.table << ['中', 2] << ['a', 'b']
expect(t.to_s).to eq "┌--┬-┐\n│中│2│\n├--┼-┤\n│a │b│\n└--┴-┘\n"
end
end
|
package com.chen.ddd.interfaces.http.handle;
import com.chen.ddd.core.common.exception.DomainRuntimeException;
import com.chen.ddd.core.common.exception.NotExistException;
import com.chen.ddd.interfaces.http.exception.NotLoginException;
import com.chen.ddd.interfaces.http.result.R;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpStatus;
import org.springframework.http.converter.HttpMessageConversionException;
import org.springframework.validation.BindException;
import org.springframework.validation.BindingResult;
import org.springframework.validation.FieldError;
import org.springframework.web.bind.MethodArgumentNotValidException;
import org.springframework.web.bind.MissingServletRequestParameterException;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.servlet.NoHandlerFoundException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.validation.ConstraintViolation;
import javax.validation.ConstraintViolationException;
import java.util.IllegalFormatException;
import java.util.List;
import java.util.Set;
/**
* 异常处理
*
* @author chen
* @since 2018/11/3 0:17.
*/
@Slf4j
@Order(Ordered.HIGHEST_PRECEDENCE)
@ControllerAdvice
public class ExceptionHandle {
/**
* 不存在异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(NotExistException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
NotExistException exception) {
log.warn("不存在异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 未登录异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(NotLoginException.class)
@ResponseBody
public R notLoginException(HttpServletRequest request, HttpServletResponse response,
NotLoginException exception) {
log.warn("未登录异常", exception);
return R.fail(R.NOT_LOGIN_FAIL_CODE, exception.getLocalizedMessage(), null);
}
/**
* 领域运行时异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(DomainRuntimeException.class)
@ResponseBody
public R doMainRuntimeException(HttpServletRequest request, HttpServletResponse response,
DomainRuntimeException exception) {
log.warn("领域运行时异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 非法参数异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(IllegalArgumentException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
IllegalArgumentException exception) {
log.warn("非法参数异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 非法状态异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(IllegalStateException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
IllegalStateException exception) {
log.warn("非法状态异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 非法格式异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(IllegalFormatException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
IllegalFormatException exception) {
log.warn("非法格式异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 参数验证失败
* {@link javax.validation.Valid} {@link org.springframework.validation.annotation.Validated}
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(MethodArgumentNotValidException.class)
@ResponseBody
public R methodArgumentNotValidException(HttpServletRequest request, HttpServletResponse response,
MethodArgumentNotValidException exception) {
log.warn("方法参数验证错误异常", exception);
BindingResult bindingResult = exception.getBindingResult();
List<FieldError> fieldErrors = bindingResult.getFieldErrors();
return R.fail(fieldErrors.get(0).getDefaultMessage());
}
/**
* 参数验证失败
* {@link javax.validation.Valid} {@link org.springframework.validation.annotation.Validated}
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(BindException.class)
@ResponseBody
public R bindException(HttpServletRequest request, HttpServletResponse response,
BindException exception) {
log.warn("方法参数验证错误异常", exception);
BindingResult bindingResult = exception.getBindingResult();
List<FieldError> fieldErrors = bindingResult.getFieldErrors();
return R.fail(fieldErrors.get(0).getDefaultMessage());
}
/**
* 参数验证失败
* {@link org.springframework.validation.annotation.Validated}
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(ConstraintViolationException.class)
@ResponseBody
public R constraintViolationException(HttpServletRequest request, HttpServletResponse response,
ConstraintViolationException exception) {
log.warn("方法参数验证错误异常", exception);
Set<ConstraintViolation<?>> constraintViolationSet = exception.getConstraintViolations();
String msg = null;
if (CollectionUtils.isNotEmpty(constraintViolationSet)) {
msg = constraintViolationSet.stream()
.findFirst()
.map(ConstraintViolation::getMessage)
.orElse(null);
}
return R.fail(msg);
}
/**
* 缺少请求参数
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(MissingServletRequestParameterException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
MissingServletRequestParameterException exception) {
log.warn("缺少请求参数", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 消息转换异常(如:json格式错误)
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(HttpMessageConversionException.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
HttpMessageConversionException exception) {
log.warn("消息转换异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 404错误.
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(NoHandlerFoundException.class)
@ResponseBody
public R noHandlerFoundException(HttpServletRequest request, HttpServletResponse response,
NoHandlerFoundException exception) {
log.warn("资源不存在异常", exception);
return R.fail(exception.getLocalizedMessage());
}
/**
* 未拦截的异常
*
* @param request request
* @param response response
* @param exception exception
* @return 错误响应
*/
@ResponseStatus(code = HttpStatus.OK)
@ExceptionHandler(Exception.class)
@ResponseBody
public R exception(HttpServletRequest request, HttpServletResponse response,
Exception exception) {
log.error("出现未拦截的异常", exception);
return R.error(exception.getLocalizedMessage());
}
}
|
#! /bin/sh
find ../../loom/graphics -name "*.[ch]" -o -name "*.cpp" > ./files.txt
files=$(cat files.txt)
for item in $files ; do
dn=$(dirname $item)
mkdir -p out/$dn
./uncrustify -f $item -c default.cfg > out/$item
done
|
import { Redis } from "ioredis";
import faker from "faker";
import { connect } from "../../src/redis/connect";
import { disconnect } from "../../src/redis/disconnect";
import { clear } from "../../src/redis/clear";
const mocks = require("../../mocks");
describe("redis/clear", () => {
const pattern = String(faker.random.number({ min: 1000 }));
let redis: Redis;
beforeAll(async () => {
redis = await connect({
uri: mocks.connection.REDIS_URI,
database: faker.random.word(),
});
});
afterAll(async () => {
await disconnect(redis);
});
beforeEach(async () => {
await redis.flushall();
await feed(redis, pattern);
});
it("should remove all items successfully", async () => {
const preKeys = await redis.keys("*");
expect(preKeys.length).toBeTruthy();
await clear(redis);
const keys = await redis.keys("*");
expect(keys.length).toBe(0);
});
it("should remove all items WITH pattern successfully", async () => {
const keyPrefix = redis.options.keyPrefix;
const preKeys = await redis.keys("*");
expect(preKeys.length).toBeTruthy();
await clear(redis, pattern);
const keys = await redis.keys(`${keyPrefix}*`);
expect(keys.length).toBeTruthy();
const deletedKeys = await redis.keys([keyPrefix, pattern, "*"].join(""));
expect(deletedKeys.length).toBeFalsy();
});
it("should do notthing if we couldn't found any items", async () => {
const keyPrefix = redis.options.keyPrefix;
const preKeys = await redis.keys("*");
expect(preKeys.length).toBeTruthy();
await clear(redis, faker.random.uuid());
const keys = await redis.keys([keyPrefix, pattern, "*"].join(""));
expect(keys.length).toBeTruthy();
});
it("should clear all items if both key prefix and pattern were not used", async () => {
const redis = await connect({ uri: mocks.connection.REDIS_URI });
await redis.flushall();
await feed(redis, pattern);
const preKeys = await redis.keys("*");
expect(preKeys.length).toBeTruthy();
await clear(redis);
const keys = await redis.keys("*");
expect(keys.length).toBe(0);
await disconnect(redis);
});
});
async function feed(redis: Redis, pattern: string) {
const count = faker.random.number({ min: 300, max: 600 });
const promises: Promise<any>[] = [];
for (let i = 0; i < count; i++) {
if (i % 2 === 0) {
promises.push(
redis.hset(faker.random.uuid(), "username", faker.internet.userName())
);
continue;
}
if (i % 3 === 0) {
promises.push(redis.sadd(faker.internet.userName(), faker.random.uuid()));
continue;
}
promises.push(redis.set([pattern, faker.random.uuid()].join("/"), 1));
}
return promises;
}
|
include defs
# docant
#
# Similar to cant(name), however precede the messge with the name
# of the program that was running when the file could not be
# opened. Helpful in a pipeline to verify which program was not
# able to open a file.
#
subroutine docant(name)
character name(ARB), prog(FILENAMESIZE)
integer length
integer getarg
length = getarg(0, prog, FILENAMESIZE)
if (length != EOF) {
call putlin(prog, STDERR)
call putch(COLON, STDERR)
call putch(BLANK, STDERR)
}
call cant(name)
return
end
|
import type { Fn, Fn0, Fn2 } from "@thi.ng/api";
export type Timestamp = number | bigint;
export type TimingResult<T> = [T, number];
export interface BenchmarkOpts {
/**
* Benchmark title (only used if `print` enabled)
*/
title: string;
/**
* Number of iterations
*
* @defaultValue 1000
*/
iter: number;
/**
* Number of calls per iteration, i.e. total number of iterations will be
* `iter * size`.
*
* @defaultValue 1
*/
size: number;
/**
* Number of warmup iterations (not included in results).
*
* @defaultValue 10
*/
warmup: number;
/**
* Result formatter
*
* @defaultValue FORMAT_DEFAULT
*/
format: BenchmarkFormatter;
/**
* If false, all output will be supressed.
*
* @defaultValue true
*/
output: boolean;
}
export type OptsWithoutTitle = Omit<BenchmarkOpts, "title">;
export interface BenchmarkSuiteOpts extends OptsWithoutTitle {}
export interface BenchmarkResult {
title: string;
/**
* Number of iterations
*/
iter: number;
/**
* Number of calls per iteration
*/
size: number;
/**
* Total execution time for all runs (in ms)
*/
total: number;
/**
* Mean execution time (in ms)
*/
mean: number;
/**
* Median execution time (in ms)
*/
median: number;
/**
* Min execution time (in ms)
*/
min: number;
/**
* Max execution time (in ms)
*/
max: number;
/**
* First quartile execution time (in ms). I.e. 25% of all runs were
* faster/equal to this measurement.
*/
q1: number;
/**
* Third quartile execution time (in ms). I.e. 25% of all runs were
* equal/slower than this measurement.
*/
q3: number;
/**
* Standard deviation (in percent)
*/
sd: number;
}
export interface BenchmarkFormatter {
/**
* Called once before the benchmark suite runs any benchmarks.
*/
prefix: Fn0<string>;
/**
* Called once for each given benchmark in the suite. Receives benchmark
* options.
*/
start: Fn<BenchmarkOpts, string>;
/**
* Called once per benchmark, just after warmup. Receives warmup time taken
* (in milliseconds) and benchmark opts.
*/
warmup: Fn2<number, BenchmarkOpts, string>;
/**
* Called once per benchmark with collected result.
*/
result: Fn<BenchmarkResult, string>;
/**
* Called once after all benchmarks have run. Receives array of all results.
*/
total: Fn<BenchmarkResult[], string>;
/**
* Called at the very end of the benchmark suite. Useful if a format
* requires any form of final suffix.
*/
suffix: Fn0<string>;
}
export interface Benchmark {
/**
* Benchmark title
*/
title: string;
/**
* Benchmark function. Will be called `size` times per `iter`ation (see
* {@link BenchmarkOpts}).
*/
fn: Fn0<void>;
/**
* Optional & partial benchmark specific option overrides (merged with opts
* given to suite)
*/
opts?: Partial<OptsWithoutTitle>;
}
export const FLOAT = (x: number) => x.toFixed(2);
export const EMPTY = () => "";
|
import React from "react";
import { graphql, PageProps } from "gatsby";
import styled from "styled-components";
import Layout from "@/components/layout.tsx";
import Header from "@/components/header/header.tsx";
import Description from "@/components/description/description.tsx";
import Features from "@/components/features/features.tsx";
import DownloadSection from "@/components/download-section/download-section.tsx";
import FAQs from "@/components/faqs/faqs.tsx";
import Footer from "@/components/footer/footer.tsx";
import SEO from "@/components/seo";
import device from "@/utils/media";
const GridContainer = styled.div`
display: grid;
grid-template-rows: repeat(5, min-content);
row-gap: 12rem;
margin-top: 9.5rem;
p {
line-height: 1.45;
}
padding: 0rem 2.7rem;
@media ${device.tablet} {
padding: 0rem 3rem;
margin-top: 9.8rem;
}
@media ${device.laptop} {
padding: 0rem 5rem;
margin-top: 11.8rem;
}
`;
type QueryData = {
site: {
siteMetadata: {
title: string;
};
};
};
const IndexPage: React.FC<PageProps<QueryData>> = ({ data }) => (
<Layout>
<SEO title={data.site.siteMetadata.title} />
<Header />
<GridContainer>
<Description />
<Features />
<DownloadSection />
<FAQs />
</GridContainer>
<Footer />
</Layout>
);
export const query = graphql`
query {
site {
siteMetadata {
title
}
}
}
`;
export default IndexPage;
|
package com.example.sharingang.ui.adapters
import android.content.Context
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.ImageView
import android.widget.TextView
import androidx.lifecycle.LifecycleCoroutineScope
import androidx.navigation.findNavController
import androidx.recyclerview.widget.RecyclerView
import com.bumptech.glide.Glide
import com.example.sharingang.R
import com.example.sharingang.database.repositories.UserRepository
import com.example.sharingang.models.User
import com.example.sharingang.ui.fragments.ChatsFragment
import com.example.sharingang.ui.fragments.ChatsFragmentDirections
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
/**
* UserAdapter takes care of adapting a list of users into a Recycler View.
*
* @property context the context
* @property users the list of users we are adapting
*/
class UserAdapter(
private val context: Context, private var users: MutableList<User>,
private val userRepository: UserRepository, private val currentUserId: String,
private val lifecycleScope: LifecycleCoroutineScope,
private val attachedFragment: ChatsFragment
) :
RecyclerView.Adapter<UserAdapter.ViewHolder>() {
init {
users = mutableListOf()
}
/**
* ViewHolder holds the fields of a user element inside a View.
*
* @param userEntryView the designed View for a user entry
*/
class ViewHolder(userEntryView: View) : RecyclerView.ViewHolder(userEntryView) {
var username: TextView = userEntryView.findViewById(R.id.chatPartnerUsername)
var imageView: ImageView = userEntryView.findViewById(R.id.chatPartnerPic)
var indicator: TextView = userEntryView.findViewById(R.id.numUnread)
}
override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder {
val view: View = LayoutInflater.from(context).inflate(R.layout.user_entry, parent, false)
return ViewHolder(view)
}
override fun onBindViewHolder(holder: ViewHolder, position: Int) {
if (position < users.size) {
val user: User = users[position]
holder.username.text = user.name
Glide.with(context).load(user.profilePicture).into(holder.imageView)
setupActions(holder, user)
lifecycleScope.launch(Dispatchers.Main) {
displayNumUnread(holder, user.id!!)
userRepository.setupConversationRefresh(currentUserId, user.id) {
if (attachedFragment.isAdded) {
displayNumUnread(holder, user.id)
}
}
}
}
}
override fun getItemCount(): Int {
return users.size
}
private fun displayNumUnread(holder: ViewHolder, targetId: String) {
lifecycleScope.launch(Dispatchers.Main) {
val nUnread = userRepository.getNumUnread(userId = currentUserId, with = targetId)
holder.indicator.text = nUnread.toString()
holder.indicator.visibility = if (nUnread == 0L) View.GONE else View.VISIBLE
}
}
/**
* Updates the current list of users based on new incoming data.
*
* @param newData the incoming data
*/
fun submitList(newData: List<User>) {
users.clear()
users.addAll(newData)
notifyDataSetChanged()
}
private fun setupActions(holder: ViewHolder, user: User) {
holder.itemView.setOnClickListener { view ->
val partnerPicture = user.profilePicture
view.findNavController().navigate(
ChatsFragmentDirections.actionChatsFragmentToMessageFragment(
user.id!!, user.name, partnerPicture
)
)
}
}
}
|
/*jshint node:true, esversion:6*/
'use strict';
const grpc = require('grpc');
const etcd = require('etcd3-rpc');
const client = new etcd.Watch('localhost:2379', grpc.credentials.createInsecure());
const stream = client.watch();
stream.on('data', function (data) {
const id = data.watch_id;
console.log('Created:', data);
stream.removeAllListeners('data');
stream.on('data', function (data) {
data.events.forEach(function (ev) {
console.log([ev.type, ev.kv.key.toString(), ev.kv.value.toString()]);
});
});
setTimeout(function () {
stream.on('data', function (data) {
console.log('Cancelled:', data);
stream.end();
client.close();
});
stream.write({ cancel_request: { watch_id: id } });
}, 1000);
});
stream.on('error', function (err) {
throw err;
});
const ALL = new Buffer('\0');
stream.write({ create_request: { key: ALL, range_end: ALL } });
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _path = require('path');
var _paths = require('./paths');
// import webpack from 'webpack';
exports.default = {
entry: {
app: [(0, _path.join)(_paths.SRC_PATH, 'client/index.js')]
// vendor: [
// 'react',
// 'react-dom',
// 'react-helmet',
// 'react-redux',
// 'react-router',
// 'leaflet',
// ],
}
};
|
<?php
/**
* This file is part of the GordyAnsell GreenFedora PHP framework.
*
* (c) Gordon Ansell <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
declare(strict_types=1);
namespace GreenFedora\Router;
use GreenFedora\Router\RouteInterface;
use GreenFedora\Router\RouteMatcherInterface;
use GreenFedora\Router\Exception\InvalidArgumentException;
/**
* Single route.
*
* @author Gordon Ansell <[email protected]>
*/
class Route implements RouteInterface
{
/**
* Route pattern.
* @var string|null
*/
protected $pattern = null;
/**
* Route target.
* @var string|null
*/
protected $target = null;
/**
* Namespaced class.
* @var string|null
*/
protected $namespacedClass = null;
/**
* Parameters.
* @var array
*/
protected $parameters = [];
/**
* Route matcher.
* @var RouteMatcherInterface
*/
/**
* Constructor.
*
* @param string $pattern Route pattern.
* @param string $target Route target.
* @param RouteMatcherInterface $routeMatcher Route matcher.
* @return void
*/
public function __construct(string $pattern, string $target, RouteMatcherInterface $routeMatcher)
{
$this->pattern = $pattern;
$this->target = $target;
$this->routeMatcher = $routeMatcher;
}
/**
* See if the route matches.
*
* @param string|null $raw Pattern to match.
* @return bool True if it matches, else false.
* @throws InvalidArgumentException
*/
public function match(?string $raw = null) : bool
{
$result = $this->routeMatcher->match($this->pattern, $raw);
if (!$result) {
return false;
}
if ($this->routeMatcher->hasParameters()) {
$this->parameters = $this->routeMatcher->getParameters();
}
return true;
}
/**
* Return the pattern.
*
* @return string Pattern.
*/
public function getPattern(): string
{
return $this->pattern;
}
/**
* Return the target.
*
* @return string Target.
*/
public function getTarget(): string
{
return $this->target;
}
/**
* Is this a special pattern?
*
* @return bool
*/
public function isSpecial(): bool
{
return '_' == $this->pattern[0] and '_' == $this->pattern[-1];
}
/**
* Get the namespaced class.
*
* @return string Full class name.
*/
public function getNamespacedClass(): string
{
return $this->namespacedClass;
}
/**
* Set namespaced class.
*
* @param string $rawRoute Raw route.
* @param string $prefix Class prefix.
* @return void.
*/
public function setNamespacedClass(string $prefix = null)
{
$class = '';
if ('\\' != $this->target[1]) {
if ($prefix) {
$class = $prefix;
}
}
$class = '\\' . trim($class, '\\') . '\\' . trim($this->target, '\\');
$this->namespacedClass = $class;
}
/**
* Get the parameters.
*
* @return array
*/
public function getParameters(): array
{
return $this->parameters;
}
/**
* Do we have parameters?
*
* @return bool
*/
public function hasParameters(): bool
{
return (count($this->parameters) > 0) ? true : false;
}
}
|
using System.Collections.Generic;
using System.Threading.Tasks;
namespace CrossX.Abstractions.Async
{
public static class AsyncExtensions
{
public static Sequence AsSequence(this Task task)
{
return Sequence.Agregate(GetSequence(task, 0));
}
public static Sequence AsSequence(this IEnumerable<Sequence> Sequences)
{
return Sequence.Agregate(Sequences);
}
public static Sequence AsSequence(this Task task, int checkIntervalInMs = 0)
{
return Sequence.Agregate(GetSequence(task, checkIntervalInMs));
}
private static IEnumerable<Sequence> GetSequence(Task task, int ms)
{
while (!task.IsCompleted)
{
var seconds = ms / 1000.0;
yield return ms == 0 ? Sequence.WaitForNextFrame() : Sequence.WaitForSeconds(seconds);
}
task.Wait();
}
}
}
|
class ContactFilterParamsValidator < ActiveModel::EachValidator
def validate_each(record, attribute, value)
return if value.blank?
return if valid?(value)
record.errors.add(attribute, :invalid)
end
private
def valid?(value)
Filter::Resource::Contact.new(
{ association_chain: Contact.all },
value
).resources.any?
true
rescue ActiveRecord::StatementInvalid
false
end
end
|
section .text
global _start
_start:
; we need some help!
section .data
; template:
; mov edx,LENGTH
; mov ecx,MSG
; mov ebx,1
; mov eax,4
; int 0x80
; end file:
; move eax,1
; int 0x80
|
require 'spec_helper'
describe "Checkout", js: true do
let!(:country) { create(:country, states_required: true) }
let!(:state) { create(:state, country: country) }
let!(:shipping_method) { create(:shipping_method) }
let!(:stock_location) { create(:stock_location) }
let!(:product) { create(:product, name: "RoR Mug", price: 19.99) }
let!(:payment_method) { create(:check_payment_method) }
let!(:zone) { create(:zone) }
let!(:user) { create(:user, email: '[email protected]', password: '123456') }
before do
product.master.stock_items.update_all(count_on_hand: 1)
end
scenario "add to cart and go to checkout page" do
add_mug_to_cart
checkout
expect(page.find('h1')).to have_content("Checkout")
end
scenario "checkout new address and new credit card" do
add_mug_to_cart
checkout
within "#billing-address" do
fill_in "first-name", with: "John"
fill_in "last-name", with: "Smith"
fill_in "address", with: "123 Main St."
fill_in "city", with: "Montgomery"
select "United States of America", from: "country"
select "Alabama", from: "state"
fill_in "zip", with: "12345"
fill_in "phone", with: "1231231234"
end
fill_in "number", with: "4111111111111111"
select "6 - June", from: "month"
select Date.today.year+1, from: "year"
fill_in "cvc", with: "123"
page.find(:css, "#confirm").click
expect(page.find('h1')).to have_content("Confirm")
page.find(:css, "#complete").click
sleep(0.1) while page.evaluate_script("document.querySelector('.loading')")
expect(page.find('h1')).to have_content("Order Submitted")
end
def checkout
sleep 2
page.find(:css, '.cart-link').click
within('#cart-aside') do
page.find(:css, '.checkout-button a').click
end
within :css, 'form[name=signinForm]' do
fill_in "email", with: "[email protected]"
fill_in "password", with: "123456"
click_on "Sign in"
end
end
def add_mug_to_cart
visit sprangular_engine.root_path(anchor: "!/products/#{product.slug}")
wait_for_loading
page.find(:css, '.add-to-cart').click
end
end
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceIotDeviceUpgradeappCreateModel(object):
def __init__(self):
self._remark = None
self._sn = None
self._target_app_id = None
self._target_app_version = None
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def sn(self):
return self._sn
@sn.setter
def sn(self, value):
if isinstance(value, list):
self._sn = list()
for i in value:
self._sn.append(i)
@property
def target_app_id(self):
return self._target_app_id
@target_app_id.setter
def target_app_id(self, value):
self._target_app_id = value
@property
def target_app_version(self):
return self._target_app_version
@target_app_version.setter
def target_app_version(self, value):
self._target_app_version = value
def to_alipay_dict(self):
params = dict()
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.sn:
if isinstance(self.sn, list):
for i in range(0, len(self.sn)):
element = self.sn[i]
if hasattr(element, 'to_alipay_dict'):
self.sn[i] = element.to_alipay_dict()
if hasattr(self.sn, 'to_alipay_dict'):
params['sn'] = self.sn.to_alipay_dict()
else:
params['sn'] = self.sn
if self.target_app_id:
if hasattr(self.target_app_id, 'to_alipay_dict'):
params['target_app_id'] = self.target_app_id.to_alipay_dict()
else:
params['target_app_id'] = self.target_app_id
if self.target_app_version:
if hasattr(self.target_app_version, 'to_alipay_dict'):
params['target_app_version'] = self.target_app_version.to_alipay_dict()
else:
params['target_app_version'] = self.target_app_version
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceIotDeviceUpgradeappCreateModel()
if 'remark' in d:
o.remark = d['remark']
if 'sn' in d:
o.sn = d['sn']
if 'target_app_id' in d:
o.target_app_id = d['target_app_id']
if 'target_app_version' in d:
o.target_app_version = d['target_app_version']
return o
|
package cn.codethink.xiaoming.concurrent;
import cn.chuanwise.common.concurrent.Promise;
/**
* 机器人相关异步结果
*
* @author Chuanwise
*
* @see cn.chuanwise.common.concurrent.Promise
*/
public interface BotPromise<T>
extends BotTask, Promise<T> {
}
|
# frozen_string_literal: true
namespace :profile do
desc "Profile Template match memory allocations"
task :template_match_memory do
require "memory_profiler"
require "addressable/template"
start_at = Time.now.to_f
template = Addressable::Template.new("https://example.com/{?one,two,three}")
report = MemoryProfiler.report do
30_000.times do
template.match(
"https://example.com/?one=one&two=floo&three=me"
)
end
end
end_at = Time.now.to_f
print_options = { scale_bytes: true, normalize_paths: true }
puts "\n\n"
if ENV["CI"]
report.pretty_print(print_options)
else
t_allocated = report.scale_bytes(report.total_allocated_memsize)
t_retained = report.scale_bytes(report.total_retained_memsize)
puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)"
puts "Total retained: #{t_retained} (#{report.total_retained} objects)"
puts "Took #{end_at - start_at} seconds"
FileUtils.mkdir_p("tmp")
report.pretty_print(to_file: "tmp/memprof.txt", **print_options)
end
end
desc "Profile URI parse memory allocations"
task :memory do
require "memory_profiler"
require "addressable/uri"
if ENV["IDNA_MODE"] == "pure"
Addressable.send(:remove_const, :IDNA)
load "addressable/idna/pure.rb"
end
start_at = Time.now.to_f
report = MemoryProfiler.report do
30_000.times do
Addressable::URI.parse(
"https://google.com/stuff/../?with_lots=of¶ms=asdff#!stuff"
).normalize
end
end
end_at = Time.now.to_f
print_options = { scale_bytes: true, normalize_paths: true }
puts "\n\n"
if ENV["CI"]
report.pretty_print(**print_options)
else
t_allocated = report.scale_bytes(report.total_allocated_memsize)
t_retained = report.scale_bytes(report.total_retained_memsize)
puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)"
puts "Total retained: #{t_retained} (#{report.total_retained} objects)"
puts "Took #{end_at - start_at} seconds"
FileUtils.mkdir_p("tmp")
report.pretty_print(to_file: "tmp/memprof.txt", **print_options)
end
end
end
|
use pyo3::PyResult;
/// The Protocol that is currently active.
pub enum SelectedProtocol {
/// The HTTP/1.x protocol handler.
H1,
}
/// Defines the two states a protocol's switch status can be either SwitchTo
/// type T, or dont switch at all.
pub enum SwitchStatus {
SwitchTo(SelectedProtocol),
NoSwitch,
}
/// Defines the required methods for making a protocol switchable.
pub trait Switchable {
/// Invoked just after the socket has been read to give the
/// chance for the protocol to be switched.
fn switch_protocol(&mut self) -> PyResult<SwitchStatus>;
}
|
using Microsoft.Extensions.DependencyInjection;
using Xunit;
namespace HttpTracker.SQLServer.Tests
{
public class HttpTrackerSQLServerProvider_Test : TestBase
{
[Fact]
public void GetConnection()
{
var provider = Services.BuildServiceProvider().GetRequiredService<IDbConnectionProvider>();
var connection = provider.Connection;
Assert.NotNull(connection);
}
}
}
|
package web
import (
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
)
const (
logKeyRequestID = "requestId"
logKeyModule = "module"
)
// LogContext 包装 gin 的上下文,用于打印日志,并给日志添加唯一的请求 ID。
type LogContext struct {
Ctx *gin.Context
Module string
Logger *logrus.Logger
}
func (c *LogContext) entry() (entry *logrus.Entry) {
if c.Logger != nil {
entry = logrus.NewEntry(c.Logger)
} else {
entry = logrus.NewEntry(logrus.StandardLogger())
}
if c.Ctx != nil {
if requestID := c.Ctx.GetString(contextKeyRequestID); requestID != "" {
entry = entry.WithField(logKeyRequestID, c.Ctx.GetString(contextKeyRequestID))
}
}
if c.Module != "" {
entry = entry.WithField(logKeyModule, c.Module)
}
return entry
}
// Info 打印 Info 级别日志打印。
func (c *LogContext) Info(args ...interface{}) {
c.entry().Info(args...)
}
// Infof 格式化打印 Info 级别日志。
func (c *LogContext) Infof(format string, args ...interface{}) {
c.entry().Infof(format, args...)
}
// Warn 打印 Warn 级别日志打印。
func (c *LogContext) Warn(args ...interface{}) {
c.entry().Warn(args...)
}
// Warnf 格式化打印 Warn 级别日志。
func (c *LogContext) Warnf(format string, args ...interface{}) {
c.entry().Warnf(format, args...)
}
// Error 打印 Error 级别日志打印。
func (c *LogContext) Error(args ...interface{}) {
c.entry().Error(args...)
}
// Errorf 格式化打印 Error 级别日志。
func (c *LogContext) Errorf(format string, args ...interface{}) {
c.entry().Errorf(format, args...)
}
|
module ManageUserSupport
include MasterSupport
def list_valid_attribs
res = []
i = -1
if User.count > 0
i = User.order(id: :desc).first.id
end
(1..5).each do |l|
res << {
email: "tst-euser-#{i+l}@testmanage.com",
disabled: false
}
end
(1..5).each do |l|
res << {
email: "dis-tst-euser-#{i+l}@testmanage.com",
disabled: true
}
end
res
end
def list_invalid_attribs
[
{
email: nil
}
]
end
def list_invalid_update_attribs
[
{
email: nil
}
]
end
def new_attribs
@new_attribs = {
disabled: true
}
end
def create_item att=nil, admin=nil
att ||= valid_attribs
att[:current_admin] = admin||@admin
@manage_user = User.create! att
end
end
|
<?php
namespace FSVendor\WPDesk\Forms\Serializer;
use FSVendor\WPDesk\Forms\Serializer;
class NoSerialize implements \FSVendor\WPDesk\Forms\Serializer
{
public function serialize($value)
{
return $value;
}
public function unserialize($value)
{
return $value;
}
}
|
module FioAPI
class Base
# Allow ruby object to be initialized with params
#
# == Parameters:
# hash::
# Hash where key is attribute and value is new attribute value
#
# == Returns:
# New object with prefilled attributes
#
def initialize(*hash)
hash.first.each { |k, v| send("#{k}=", v) } if hash.length == 1 && hash.first.is_a?(Hash)
end
end
end
|
import os
from django.shortcuts import render
from django.views import View
from django.urls import reverse
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import (
CustomUserSignupSerializer,
CustomUserLoginSerializer
)
from .authentication import ExternalJWTAuthentication, UserAPIAuthentication
from jwt import decode as jwt_decode
from .models import CustomUser, BlackListedTokens
from lordoftherings.models import Favorite, Character, Quote
class SignupAPIView(APIView):
"""Signup new user"""
serializer_class = CustomUserSignupSerializer
#authentication_classes = (ExternalJWTAuthentication,)
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
if not user:
return Response(
{'status': False, 'message': 'User already exists'},
status=status.HTTP_200_OK,
)
user_id = user.id
return Response(
{
'status': True,
'message': 'User created successfully.',
'data': {'user_id': user_id},
},
status=status.HTTP_201_CREATED,
)
class LoginAPIView(APIView):
"""Login user"""
serializer_class = CustomUserLoginSerializer
#authentication_classes = (ExternalJWTAuthentication,)
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
if not user:
return Response(
{'status': False, 'message': 'Invalid user account'},
status=status.HTTP_200_OK,
)
if user['user'].check_password(user['password']):
token = user['user'].generate_jwt_token()
return Response(
{
'status': True,
'message': 'User logged in successfully.',
'data': {'auth-token': token},
},
status=status.HTTP_200_OK,
)
else:
return Response(
{
'status': False,
'message': 'Unable to log in, Invalid User.',
},
status=status.HTTP_200_OK,
)
class LogoutAPIView(APIView):
"""Logout end user"""
#authentication_classes = (UserAPIAuthentication,)
def get(self, request):
auth = request.headers['X-FORWARDED-USER'].split()
token = auth[1]
try:
payload = jwt_decode(token, settings.JWT_SECRET_KEY, algorithms=[settings.JWT_ALGO])
user = CustomUser.objects.get(id=payload.get('id'))
except CustomUser.DoesNotExist:
return Response(
{
'status': False,
'message': 'Unable to logout: Invalid User session.',
},
status=status.HTTP_200_OK,
)
try:
BlackListedTokens.objects.get(token=token)
return Response(
{
'status': False,
'message': 'User already logged out.',
},
status=status.HTTP_200_OK,
)
except BlackListedTokens.DoesNotExist:
log_out = BlackListedTokens(token=token)
log_out.save()
return Response(
{
'status': True,
'message': 'User logged out successfully.',
},
status=status.HTTP_200_OK,
)
except Exception as e:
return Response(
{
'status': False,
'message': 'Trouble logging out user, try again later. {}'.format(e),
},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class UserFavoritesAPIView(APIView):
#authentication_classes = (ExternalJWTAuthentication,)
def get(self, request, user_id):
try:
fav = Favorite.objects.get(user=user_id)
except Favorite.DoesNotExist:
return Response({'empty_results_errors': ['Favorites Not Found']}, status=status.HTTP_404_NOT_FOUND)
characters = []
quotes = []
for char in fav.character.all():
characters.append(Character.objects.get(id=char.id))
for quo in fav.quote.all():
quotes.append(Quote.objects.get(id=quo.id))
data = []
data.append({
'id':fav.id,
'user':fav.user.id,
'characters':characters,
'quotes':quotes
})
return Response(
{
'status': False,
'data': data,
},
status=status.HTTP_200_OK,
)
|
package js_test
import (
"testing"
"github.com/sensu/sensu-go/js"
"github.com/sensu/sensu-go/types"
"github.com/sensu/sensu-go/types/dynamic"
)
func BenchmarkCheckEval(b *testing.B) {
check := types.FixtureCheck("foo")
for i := 0; i < b.N; i++ {
synth := dynamic.Synthesize(check)
params := map[string]interface{}{
"check": synth,
}
_, _ = js.Evaluate("check.status == 0", params, nil)
}
}
func BenchmarkMetricsEval(b *testing.B) {
metrics := types.FixtureMetrics()
for i := 0; i < b.N; i++ {
synth := dynamic.Synthesize(metrics)
params := map[string]interface{}{
"metrics": synth,
}
_, _ = js.Evaluate("metrics.points.length > 0", params, nil)
}
}
|
# Non-Raspberry Pi OS images
In theory there is no reason this could not mount an arbitrary disk image's partitions.
However, this has knowledge of the specific partitions, e.g. the first partition is the
boot partition and should mount to `/boot`, while the second is the root partition,
and no other partition are examined.
This could be disabled with a `--flat` command-line option. This could mount each
partition under /data/mnt, i.e. a Raspberry Pi boot image would look like this:
* `/data/mnt/boot`
* `/data/mnt/rootfs`
(The partitions have the labels "boot" and "rootfs").
The `partition-size` command would need to be modified to specifically look for the
last partition as the one to resize, or be modified to use `dd` to actually move later
partitions to make room before resizing the filesystem.
As there are other partition layouts in use (e.g. NOOBS), this may be worth the effort.
|
var dir_4d8456d82305c4d30846a28617d1ad4b =
[
[ "onosproject", "dir_722c769f378eac8c3f9b59d51141f76a.html", "dir_722c769f378eac8c3f9b59d51141f76a" ]
];
|
{-----------------------------------------------------------------------------
--
-- Module | -- Dependency and other Codes
--
-- | the codes for TinT parser for italian
--name of pos tagset ISST TANL Tagset
-- from http://www.italianlp.it/docs/ISST-TANL-POStagset.pdf
-- model used TinT
-----------------------------------------------------------------------------}
--{-# OPTIONS_GHC -F -pgmF htfpp #-}
{-# LANGUAGE MultiParamTypeClasses
, ScopedTypeVariables
, FlexibleContexts
, OverloadedStrings
, TypeSynonymInstances
, FlexibleInstances
, DeriveAnyClass
, DeriveGeneric
#-}
module NLP.Corpora.ItalianTinT (module NLP.Corpora.ItalianTinT
-- , module NLP.Corpora.Conll
-- , ErrOrVal (..)
)
where
import GHC.Generics
import Data.Serialize (Serialize)
import qualified Data.Text as T
import Data.Text (Text)
import Data.Utilities
--import Test.Framework
import Test.QuickCheck.Arbitrary (Arbitrary(..))
import Test.QuickCheck.Gen (elements)
--import Uniform.Zero
--import Uniform.Strings
--import Uniform.Error
import Data.Text as T (replace)
import Text.Read (readEither)
--import qualified NLP.Corpora.Conll as Conll
import qualified NLP.Types.Tags as NLPtypes
--import NLP.Corpora.Conll
--import NLP.Corpora.Conll as Conll
--type PosTagEng = Conll.Tag -- renames the ConllTag
--instance CharChains2 PosTagEng Text
data POStagTinT = -- the definitions are in http://www.italianlp.it/docs/ISST-TANL-POStagset.pdf
START | -- START tag, used in training.
END | --END tag, used in training.
A | -- felice
AP | -- nostro
B | -- domani
BplusPC | -- eccolo
BN | -- non
CC | -- e
CS | -- che
DD | -- quel
DE | -- che
DI | -- ogni
DQ | -- che
DR | -- cui
E | -- a
EplusRD | -- dalla
EA |
FB | -- -
FC | -- ;
FF | -- -
FS | -- ?
I | -- Oh
N | -- Sei
NO | -- ultima
PC | -- ti
PCplusPC | -- gliele
PD | -- quello
PE | -- Noi
PI | -- tutto
PP | -- mio
PQ | -- Che
PR | -- Che
RD | -- il -- RD?
RI | -- una
S | -- nutrice
SP | -- FULVIA
SW | -- grand'
T | -- tutti
V | -- vedere
VplusPC | -- avervi
VplusPCplusPC | -- occuparsene
VA | -- è
VAplusPC | -- averlo
VM | -- volevo
VMplusPC | -- poterci
VMplusPCplusPC | -- sferrarsene
X | -- FINE -- residual class
TinTunk -- other -- conflicts possible!
deriving (Read, Show, Ord, Eq, Generic, Enum, Bounded)
{- additional information available
"index": 53,
"word": "pregiudizi",
"originalText": "pregiudizi",
"lemma": "pregiudizio",
"characterOffsetBegin": 267,
"characterOffsetEnd": 277,
"pos": "S",
"featuresText": "Gender\u003dMasc|Number\u003dPlur",
"ner": "O",
"full_morpho": "pregiudizi pregiudizio+n+m+plur",
"selected_morpho": "pregiudizio+n+m+plur",
"guessed_lemma": false,
"features": {
"Gender": [
"Masc"
],
"Number": [
"Plur"
]
-}
instance NLPtypes.POStags POStagTinT where
--parseTag :: Text -> PosTag
parseTag txt = case readTag txt of
Left _ -> NLPtypes.tagUNK
Right t -> t
tagUNK = TinTunk
tagTerm = showTag
startTag = START
endTag = END
isDt tag = tag `elem` [RD]
instance Arbitrary POStagTinT where
arbitrary = elements [minBound ..]
instance Serialize POStagTinT
readTag :: Text -> ErrOrVal POStagTinT
--readTag "#" = Right Hash
--readTag "$" = Right Dollar
--readTag "(" = Right Op_Paren
--readTag ")" = Right Cl_Paren
--readTag "''" = Right CloseDQuote
--readTag "``" = Right OpenDQuote
--readTag "," = Right Comma
--readTag "." = Right Term
--readTag ":" = Right Colon
readTag txt =
let normalized = replaceAll tagTxtPatterns (T.toUpper txt)
in (readOrErr normalized)
-- | Order matters here: The patterns are replaced in reverse order
-- when generating tags, and in top-to-bottom when generating tags.
tagTxtPatterns :: [(Text, Text)]
tagTxtPatterns = [ ("$", "dollar")
, ("+", "plus")
]
reversePatterns :: [(Text, Text)]
reversePatterns = map (\(x,y) -> (y,x)) tagTxtPatterns
showTag :: POStagTinT -> Text
--showTag Hash = "#"
--showTag Op_Paren = "("
--showTag Cl_Paren = ")"
--showTag CloseDQuote = "''"
--showTag OpenDQuote = "``"
--showTag Dollar = "$"
--showTag Comma = ","
--showTag Term = "."
--showTag Colon = ":"
showTag tag = replaceAll reversePatterns (s2t $ show tag)
--replaceAll :: [(Text, Text)] -> (Text -> Text)
--replaceAll patterns = foldl (.) id (map (uncurry T.replace) patterns)
--readTag :: Text -> ErrOrVal POStagTinT
--readTag txt = maybe2errorP . read . t2s $ txt
--
--maybe2errorP :: Maybe a -> ErrOrVal a
--maybe2errorP Nothing = Left "readTag POStagTinT 34232"
--maybe2errorP (Just a) = Right a
--readOrErr :: Read a => Text -> Either Text a
--readOrErr t = case (readEither (t2s t)) of
-- Left msg -> Left (s2t msg)
-- Right a -> Right a
--instance CharChains2 POStagTinT String where
-- show' = show
--instance CharChains2 POStagTinT Text where
-- show' = s2t . show
--
--instance Zeros POStagTinT where zero = NLPtypes.tagUNK
----type Unk = Conll.Unk
|
//===-- common.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "common.h"
#include "atomic_helpers.h"
namespace scudo {
uptr PageSizeCached;
uptr getPageSize();
uptr getPageSizeSlow() {
PageSizeCached = getPageSize();
CHECK_NE(PageSizeCached, 0);
return PageSizeCached;
}
// Fatal internal map() or unmap() error (potentially OOM related).
void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
outputRaw("Scudo ERROR: internal map or unmap failure");
if (OutOfMemory)
outputRaw(" (OOM)");
outputRaw("\n");
die();
}
} // namespace scudo
|
package com.tickr.tickr.api.utils
import java.io.IOException
import retrofit2.Response
import retrofit2.Retrofit
/**
* Created by bry1337 on 25/01/2018.
*
* @author [email protected]
*/
class RetrofitException(builder: Builder) : RuntimeException(builder.message, builder.exception) {
/** The request URL which produced the error. */
val url: String?
/** Response object containing status code, headers, body, etc. */
val response: Response<*>?
/** The event kind which triggered this error. */
val kind: Kind?
/** The Retrofit this request was executed on */
val retrofit: Retrofit?
init {
this.url = builder.url
this.response = builder.response
this.kind = builder.kind
this.retrofit = builder.retrofit
}
/**
* HTTP response body converted to specified `type`. `null` if there is no
* response.
*
* @throws IOException if unable to convert the body to the specified `type`.
*/
@Throws(IOException::class)
fun <T> getErrorBodyAs(type: Class<T>): T? {
if (response == null || response.errorBody() == null) {
return null
}
val converter = retrofit!!.responseBodyConverter<T>(type, arrayOfNulls(0))
return converter.convert(response.errorBody())
}
/** Identifies the event kind which triggered a [RetrofitException]. */
enum class Kind {
/** An [IOException] occurred while communicating to the server. */
NETWORK,
/** A non-200 HTTP status code was received from the server. */
HTTP,
/**
* An internal error occurred while attempting to execute a request. It is best practice to
* re-throw this exception so your application crashes.
*/
UNEXPECTED
}
class Builder(internal val message: String?, internal val exception: Throwable?) {
internal var url: String? = null
internal var response: Response<*>? = null
internal var kind: Kind? = null
internal var retrofit: Retrofit? = null
init {
this.url = null
this.response = null
this.kind = null
this.retrofit = null
}
fun setUrl(url: String?): Builder {
this.url = url
return this
}
fun setResponse(response: Response<*>?): Builder {
this.response = response
return this
}
fun setKind(kind: Kind): Builder {
this.kind = kind
return this
}
fun setRetrofit(retrofit: Retrofit?): Builder {
this.retrofit = retrofit
return this
}
fun build(): RetrofitException {
return RetrofitException(this)
}
}
companion object {
fun httpError(url: String, response: Response<*>, retrofit: Retrofit): RetrofitException {
val message = response.code().toString() + " " + response.message()
return Builder(message, null).setUrl(url)
.setResponse(response)
.setKind(Kind.HTTP)
.setRetrofit(retrofit)
.build()
}
fun networkError(exception: IOException): RetrofitException {
return Builder(exception.message, exception).setUrl(null)
.setResponse(null)
.setKind(Kind.NETWORK)
.setRetrofit(null)
.build()
}
fun unexpectedError(exception: Throwable): RetrofitException {
return Builder(exception.message, exception).setUrl(null)
.setResponse(null)
.setKind(Kind.UNEXPECTED)
.setRetrofit(null)
.build()
}
}
}
|
//
// AppDelegate+CJAppDelegateCategory.h
// sliderViewcontroller
//
// Created by ccj on 2017/1/16.
// Copyright © 2017年 ccj. All rights reserved.
//
#import "AppDelegate.h"
/**********************作用:适配屏幕********************************
用法:
cjW(width)
cjH(height)
cjX(x)
cjY(y)
cjSize(size)
*******************************************************************/
@interface AppDelegate (CJAutoSize)
-(CGFloat)autoRealHorizonSize:(CGFloat)estimateHorizonSize;
-(CGFloat)autoRealVerticalSize:(CGFloat)estimateVerticalSize;
-(CGFloat)autoRealSize:(CGFloat)estimateSize;
#define cjW(width) [(AppDelegate *)[UIApplication sharedApplication].delegate autoRealHorizonSize:width]
#define cjH(height) [(AppDelegate *)[UIApplication sharedApplication].delegate autoRealVerticalSize:height]
#define cjX(x) [(AppDelegate *)[UIApplication sharedApplication].delegate autoRealHorizonSize:x]
#define cjY(y) [(AppDelegate *)[UIApplication sharedApplication].delegate autoRealVerticalSize:y]
#define cjSize(size) [(AppDelegate *)[UIApplication sharedApplication].delegate autoRealSize:size]
@end
|
package dora.db.table
import android.database.SQLException
import android.database.sqlite.SQLiteDatabase
import dora.db.Orm
import dora.db.OrmLog
import dora.db.Transaction
import dora.db.constraint.*
import dora.db.dao.DaoFactory.removeDao
import dora.db.exception.ConstraintException
import dora.db.type.DataType
import dora.db.type.BooleanType
import dora.db.type.ByteArrayType
import dora.db.type.ByteType
import dora.db.type.CharType
import dora.db.type.ClassType
import dora.db.type.DoubleType
import dora.db.type.FloatType
import dora.db.type.IntType
import dora.db.type.LongType
import dora.db.type.ShortType
import dora.db.type.SqlType
import dora.db.type.StringType
import java.lang.reflect.Field
import java.lang.reflect.Modifier
import java.util.*
object TableManager {
private val A = 'A'
private val Z = 'Z'
private val CREATE_TABLE = "CREATE TABLE"
private val ALTER_TABLE = "ALTER TABLE"
private val DROP_TABLE = "DROP TABLE"
private val IF_NOT_EXISTS = "IF NOT EXISTS"
private val IF_EXISTS = "IF EXISTS"
private val ADD_COLUMN = "ADD COLUMN"
private val AUTO_INCREMENT = "AUTOINCREMENT"
private val SPACE = " "
private val SINGLE_QUOTES = "\'"
private val UNIQUE = "UNIQUE"
private val DEFAULT = "DEFAULT"
private val CHECK = "CHECK"
private val NOT_NULL = "NOT NULL"
private val PRIMARY_KEY = "PRIMARY KEY"
private val LEFT_PARENTHESIS = "("
private val RIGHT_PARENTHESIS = ")"
private val COMMA = ","
private val SEMICOLON = ";"
private val UNDERLINE = "_"
private val TABLE_NAME_HEADER = "t$UNDERLINE"
fun <T : OrmTable> getTableName(tableClass: Class<T>): String {
val table = tableClass.getAnnotation(Table::class.java)
val tableName: String
tableName = if (table != null) {
table.value
} else {
val className = tableClass.simpleName
generateTableName(className)
}
return tableName
}
fun getColumnName(field: Field): String {
val columnName: String
val id = field.getAnnotation(Id::class.java)
val column = field.getAnnotation(Column::class.java)
columnName = when {
id != null -> {
OrmTable.INDEX_ID
}
column != null -> {
column.value
}
else -> {
val fieldName = field.name
generateColumnName(fieldName)
}
}
return columnName
}
fun generateTableName(className: String): String {
val sb = StringBuilder()
for (i in className.indices) {
if (className[i] in A..Z && i != 0) {
sb.append(UNDERLINE)
}
sb.append(className[i].toString().toLowerCase(Locale.ENGLISH))
}
return TABLE_NAME_HEADER + sb.toString().toLowerCase()
}
fun generateColumnName(fieldName: String): String {
val sb = StringBuilder()
for (i in fieldName.indices) {
if (fieldName[i] in A..Z && i != 0) {
sb.append(UNDERLINE)
}
sb.append(fieldName[i].toString().toLowerCase(Locale.ENGLISH))
}
return sb.toString().toLowerCase()
}
private val declaredDataTypes: List<DataType>
get() {
val dataTypes: MutableList<DataType> = arrayListOf()
dataTypes.add(BooleanType.INSTANCE)
dataTypes.add(ByteType.INSTANCE)
dataTypes.add(ShortType.INSTANCE)
dataTypes.add(IntType.INSTANCE)
dataTypes.add(LongType.INSTANCE)
dataTypes.add(FloatType.INSTANCE)
dataTypes.add(DoubleType.INSTANCE)
dataTypes.add(CharType.INSTANCE)
dataTypes.add(StringType.INSTANCE)
dataTypes.add(ClassType.INSTANCE)
return dataTypes
}
private fun matchDataType(fieldType: Class<*>): DataType {
val dataTypes: List<DataType> = declaredDataTypes
for (dataType in dataTypes) {
if (dataType.matches(fieldType)) {
return dataType
}
}
return ByteArrayType.INSTANCE
}
private fun <A : Annotation> checkColumnConstraint(field: Field, annotationType: Class<A>): Boolean {
val annotation = field.getAnnotation(annotationType)
return annotation != null
}
private fun <A : Annotation, V> getColumnConstraintValue(field: Field, annotationType: Class<A>,
valueType: Class<V>): V? {
var value: V? = null
val annotation = field.getAnnotation(annotationType)
if (Default::class.java.isAssignableFrom(annotationType)) {
value = (annotation as Default).value as V
}
if (Check::class.java.isAssignableFrom(annotationType)) {
value = (annotation as Check).value as V
}
if (PrimaryKey::class.java.isAssignableFrom(annotationType)) {
value = (annotation as PrimaryKey).value as V
}
return value
}
private class ColumnBuilder {
private var builder: StringBuilder
private var field: Field
var isPrimaryKey = false
constructor(field: Field) {
this.field = field
builder = StringBuilder()
}
constructor(str: String?, field: Field) {
this.field = field
builder = StringBuilder(str)
}
private fun append(str: String): ColumnBuilder {
builder.append(str)
return this
}
fun buildColumnUnique(): ColumnBuilder {
if (checkColumnConstraint(field, Unique::class.java)) {
builder.append(SPACE).append(UNIQUE)
}
return this
}
fun buildColumnDefault(): ColumnBuilder {
if (checkColumnConstraint(field, Default::class.java)) {
val value = getColumnConstraintValue(field, Default::class.java, String::class.java)!!
try {
val number = value.toLong()
builder.append(SPACE).append(DEFAULT)
.append(SPACE).append(SINGLE_QUOTES).append(number).append(SINGLE_QUOTES)
} catch (e: NumberFormatException) {
builder.append(SPACE).append(DEFAULT)
.append(SPACE).append(SINGLE_QUOTES).append(value).append(SINGLE_QUOTES)
}
}
return this
}
fun buildColumnCheck(): ColumnBuilder {
if (checkColumnConstraint(field, Check::class.java)) {
val value = getColumnConstraintValue(field, Check::class.java, String::class.java)!!
builder.append(SPACE).append(CHECK).append(LEFT_PARENTHESIS)
.append(value).append(RIGHT_PARENTHESIS)
}
return this
}
fun buildColumnNotNull(): ColumnBuilder {
if (checkColumnConstraint(field, NotNull::class.java)) {
builder.append(SPACE).append(NOT_NULL)
}
return this
}
fun buildColumnPrimaryKey(): ColumnBuilder {
if (checkColumnConstraint(field, Id::class.java)) {
isPrimaryKey = true
builder.append(SPACE).append(PRIMARY_KEY).append(SPACE).append(AUTO_INCREMENT)
} else if (checkColumnConstraint(field, PrimaryKey::class.java)) {
isPrimaryKey = true
builder.append(SPACE).append(PRIMARY_KEY)
val assignType = getColumnConstraintValue(field, PrimaryKey::class.java,
AssignType::class.java)!!
if (assignType == AssignType.BY_MYSELF) {
} else if (assignType == AssignType.AUTO_INCREMENT) {
builder.append(SPACE).append(AUTO_INCREMENT)
}
}
return this
}
fun build(): String {
return builder.toString()
}
}
private fun createColumnBuilder(field: Field): ColumnBuilder {
val dataType: DataType = matchDataType(field.type)
val sqlType: SqlType = dataType.sqlType
var columnType: String = sqlType.name
val convert: Convert? = field.getAnnotation(Convert::class.java)
if (convert != null) {
// 使用convert的columnType的值再次匹配
columnType = matchDataType(convert.columnType.java).sqlType.name
}
val columnName = getColumnName(field)
val fieldBuilder = ColumnBuilder(columnName + SPACE + columnType, field)
fieldBuilder.buildColumnUnique()
.buildColumnDefault()
.buildColumnCheck()
.buildColumnNotNull()
.buildColumnPrimaryKey()
return fieldBuilder
}
private fun <T : OrmTable> _createTable(tableClass: Class<T>, db: SQLiteDatabase) {
val tableName = getTableName(tableClass)
val fields = tableClass.declaredFields
val sqlBuilder = StringBuilder(CREATE_TABLE + SPACE + IF_NOT_EXISTS + SPACE
+ tableName + LEFT_PARENTHESIS) //table header
var hasPrimaryKey = false
for (field in fields) {
field.isAccessible = true
val ignore = field.getAnnotation(Ignore::class.java)
if (ignore != null || field.modifiers and Modifier.STATIC != 0) {
continue
}
val fieldBuilder = createColumnBuilder(field)
if (fieldBuilder.isPrimaryKey) {
hasPrimaryKey = true
}
sqlBuilder.append(fieldBuilder.build()).append(COMMA)
}
if (!hasPrimaryKey) {
throw ConstraintException("Lack valid primary key.")
}
try {
val sql = sqlBuilder.deleteCharAt(sqlBuilder.length - 1).append(RIGHT_PARENTHESIS)
.append(SEMICOLON).toString()
OrmLog.d(sql)
db.execSQL(sql)
} catch (e: SQLException) {
e.message?.let { OrmLog.i(it) }
}
removeDao(tableClass)
}
private fun <T : OrmTable> _upgradeTable(tableClass: Class<T>, db: SQLiteDatabase) {
val tableName = getTableName(tableClass)
val fields = tableClass.declaredFields
for (field in fields) {
field.isAccessible = true
val ignore = field.getAnnotation(Ignore::class.java)
if (ignore != null || field.modifiers and Modifier.STATIC != 0) {
continue
}
try {
val sql = (ALTER_TABLE + SPACE + tableName + SPACE + ADD_COLUMN + SPACE
+ createColumnBuilder(field).build() + SEMICOLON)
OrmLog.d(sql)
db.execSQL(sql)
} catch (e: SQLException) {
e.message?.let { OrmLog.i(it) }
}
}
removeDao(tableClass)
}
private fun <T : OrmTable> _dropTable(tableClass: Class<T>, db: SQLiteDatabase) {
val tableName = getTableName(tableClass)
val sql = (DROP_TABLE + SPACE + getTableName(tableClass) + SPACE + IF_EXISTS + SPACE
+ tableName)
OrmLog.d(sql)
db.execSQL(sql)
removeDao(tableClass)
}
fun <T : OrmTable> createTable(tableClass: Class<T>) {
if (Orm.isPrepared()) {
_createTable(tableClass, Orm.getDB())
}
}
fun <T : OrmTable> upgradeTable(tableClass: Class<T>) {
if (Orm.isPrepared()) {
_upgradeTable(tableClass, Orm.getDB())
}
}
fun <T : OrmTable> dropTable(tableClass: Class<T>) {
if (Orm.isPrepared()) {
_dropTable(tableClass, Orm.getDB())
}
}
fun <T : OrmTable> recreateTable(tableClass: Class<T>) {
if (Orm.isPrepared()) {
Transaction.execute(tableClass) {
_dropTable(tableClass, db)
_createTable(tableClass, db)
}
}
}
}
|
package levenshtein
func (l Levenshtein) IterativeCache() int {
prev := make([]int, len(l.s2)+1)
for i := 0; i <= len(l.s2); i++ {
prev[i] = i
}
cur := make([]int, len(l.s2)+1)
for i := 1; i <= len(l.s1); i++ {
cur[0] = i
for j := 1; j <= len(l.s2); j++ {
eq := 1
if l.s1[i-1] == l.s2[j-1] {
eq = 0
}
cur[j] = minFromThree(
cur[j-1]+1,
prev[j]+1,
prev[j-1]+eq,
)
}
copy(prev, cur)
}
return cur[len(l.s2)]
}
|
// tag::create-archive-with-base-plugin-example[]
plugins {
base
}
version = "1.0.0"
tasks.register<Zip>("packageDistribution") {
from(layout.buildDirectory.dir("toArchive")) {
exclude("**/*.pdf")
}
from(layout.buildDirectory.dir("toArchive")) {
include("**/*.pdf")
into("docs")
}
}
// end::create-archive-with-base-plugin-example[]
|
/*
* Copyright (C) 2013 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okhttp3
import java.io.IOException
import java.net.Proxy
import java.security.cert.X509Certificate
import java.time.Duration
import mockwebserver3.MockResponse
import mockwebserver3.MockWebServer
import mockwebserver3.SocketPolicy
import okhttp3.Headers.Companion.headersOf
import okhttp3.MediaType.Companion.toMediaType
import okhttp3.RequestBody.Companion.toRequestBody
import okhttp3.TestUtil.assertSuppressed
import okhttp3.internal.DoubleInetAddressDns
import okhttp3.internal.connection.RealConnection
import okhttp3.internal.connection.RealConnection.Companion.IDLE_CONNECTION_HEALTHY_NS
import okhttp3.internal.http.RecordingProxySelector
import okhttp3.testing.Flaky
import okhttp3.testing.PlatformRule
import okhttp3.tls.internal.TlsUtil.localhost
import okio.BufferedSink
import org.assertj.core.api.Assertions.assertThat
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.BeforeEach
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.Timeout
import org.junit.jupiter.api.extension.RegisterExtension
import org.junit.jupiter.api.fail
@Timeout(30)
class CallKotlinTest(
val server: MockWebServer
) {
@JvmField @RegisterExtension val platform = PlatformRule()
@JvmField @RegisterExtension val clientTestRule = OkHttpClientTestRule().apply {
recordFrames = true
recordSslDebug = true
}
private var client = clientTestRule.newClient()
private val handshakeCertificates = localhost()
@BeforeEach
fun setup() {
platform.assumeNotBouncyCastle()
}
@Test
fun legalToExecuteTwiceCloning() {
server.enqueue(MockResponse().setBody("abc"))
server.enqueue(MockResponse().setBody("def"))
val request = Request(server.url("/"))
val call = client.newCall(request)
val response1 = call.execute()
val cloned = call.clone()
val response2 = cloned.execute()
assertThat("abc").isEqualTo(response1.body.string())
assertThat("def").isEqualTo(response2.body.string())
}
@Test
@Flaky
fun testMockWebserverRequest() {
enableTls()
server.enqueue(MockResponse().setBody("abc"))
val request = Request.Builder().url(server.url("/")).build()
val response = client.newCall(request).execute()
response.use {
assertEquals(200, response.code)
assertEquals("CN=localhost",
(response.handshake!!.peerCertificates.single() as X509Certificate).subjectDN.name)
}
}
private fun enableTls() {
client = client.newBuilder()
.sslSocketFactory(
handshakeCertificates.sslSocketFactory(), handshakeCertificates.trustManager)
.build()
server.useHttps(handshakeCertificates.sslSocketFactory())
}
@Test
fun testHeadAfterPut() {
class ErringRequestBody : RequestBody() {
override fun contentType(): MediaType {
return "application/xml".toMediaType()
}
override fun writeTo(sink: BufferedSink) {
sink.writeUtf8("<el")
sink.flush()
throw IOException("failed to stream the XML")
}
}
class ValidRequestBody : RequestBody() {
override fun contentType(): MediaType {
return "application/xml".toMediaType()
}
override fun writeTo(sink: BufferedSink) {
sink.writeUtf8("<element/>")
sink.flush()
}
}
server.enqueue(MockResponse().apply {
setResponseCode(201)
})
server.enqueue(MockResponse().apply {
setResponseCode(204)
})
server.enqueue(MockResponse().apply {
setResponseCode(204)
})
val endpointUrl = server.url("/endpoint")
var request = Request.Builder()
.url(endpointUrl)
.header("Content-Type", "application/xml")
.put(ValidRequestBody())
.build()
// 201
client.newCall(request).execute()
request = Request.Builder()
.url(endpointUrl)
.head()
.build()
// 204
client.newCall(request).execute()
request = Request.Builder()
.url(endpointUrl)
.header("Content-Type", "application/xml")
.put(ErringRequestBody())
.build()
try {
client.newCall(request).execute()
fail("test should always throw exception")
} catch (_: IOException) {
// NOTE: expected
}
request = Request.Builder()
.url(endpointUrl)
.head()
.build()
client.newCall(request).execute()
var recordedRequest = server.takeRequest()
assertEquals("PUT", recordedRequest.method)
recordedRequest = server.takeRequest()
assertEquals("HEAD", recordedRequest.method)
recordedRequest = server.takeRequest()
assertThat(recordedRequest.failure).isNotNull()
recordedRequest = server.takeRequest()
assertEquals("HEAD", recordedRequest.method)
}
@Test
fun staleConnectionNotReusedForNonIdempotentRequest() {
// Capture the connection so that we can later make it stale.
var connection: RealConnection? = null
client = client.newBuilder()
.addNetworkInterceptor(Interceptor { chain ->
connection = chain.connection() as RealConnection
chain.proceed(chain.request())
})
.build()
server.enqueue(MockResponse().setBody("a")
.setSocketPolicy(SocketPolicy.SHUTDOWN_OUTPUT_AT_END))
server.enqueue(MockResponse().setBody("b"))
val requestA = Request(server.url("/"))
val responseA = client.newCall(requestA).execute()
assertThat(responseA.body.string()).isEqualTo("a")
assertThat(server.takeRequest().sequenceNumber).isEqualTo(0)
// Give the socket a chance to become stale.
connection!!.idleAtNs -= IDLE_CONNECTION_HEALTHY_NS
Thread.sleep(250)
val requestB = Request(
url = server.url("/"),
body = "b".toRequestBody("text/plain".toMediaType()),
)
val responseB = client.newCall(requestB).execute()
assertThat(responseB.body.string()).isEqualTo("b")
assertThat(server.takeRequest().sequenceNumber).isEqualTo(0)
}
/** Confirm suppressed exceptions that occur while connecting are returned. */
@Test fun connectExceptionsAreReturnedAsSuppressed() {
val proxySelector = RecordingProxySelector()
proxySelector.proxies.add(Proxy(Proxy.Type.HTTP, TestUtil.UNREACHABLE_ADDRESS_IPV4))
proxySelector.proxies.add(Proxy.NO_PROXY)
server.shutdown()
client = client.newBuilder()
.proxySelector(proxySelector)
.readTimeout(Duration.ofMillis(100))
.connectTimeout(Duration.ofMillis(100))
.build()
val request = Request(server.url("/"))
try {
client.newCall(request).execute()
fail("")
} catch (expected: IOException) {
expected.assertSuppressed {
val suppressed = it.single()
assertThat(suppressed).isInstanceOf(IOException::class.java)
assertThat(suppressed).isNotSameAs(expected)
}
}
}
/** Confirm suppressed exceptions that occur after connecting are returned. */
@Test fun httpExceptionsAreReturnedAsSuppressed() {
server.enqueue(MockResponse().setSocketPolicy(SocketPolicy.DISCONNECT_AT_START))
server.enqueue(MockResponse().setSocketPolicy(SocketPolicy.DISCONNECT_AT_START))
client = client.newBuilder()
.dns(DoubleInetAddressDns()) // Two routes so we get two failures.
.build()
val request = Request(server.url("/"))
try {
client.newCall(request).execute()
fail("")
} catch (expected: IOException) {
expected.assertSuppressed {
val suppressed = it.single()
assertThat(suppressed).isInstanceOf(IOException::class.java)
assertThat(suppressed).isNotSameAs(expected)
}
}
}
@Test
fun responseRequestIsLastRedirect() {
server.enqueue(
MockResponse()
.setResponseCode(302)
.addHeader("Location: /b")
)
server.enqueue(MockResponse())
val request = Request(server.url("/"))
val call = client.newCall(request)
val response = call.execute()
assertThat(response.request.url.encodedPath).isEqualTo("/b")
assertThat(response.request.headers).isEqualTo(headersOf())
}
}
|
#![cfg_attr(not(feature = "std"), no_std)]
use core::cmp::PartialEq;
use core::convert::From;
use core::ops::Neg;
use core::ops::{Add, AddAssign};
use core::ops::{Div, DivAssign};
use core::ops::{Index, IndexMut};
use core::ops::{Mul, MulAssign};
use core::ops::{Sub, SubAssign};
use core::slice::SliceIndex;
use serde::{Serialize, Deserialize};
#[macro_use]
pub mod sparse;
#[cfg_attr(test, macro_use)]
extern crate alloc;
use alloc::vec::{IntoIter, Vec};
/// A [`Polynomial`] is just a vector of coefficients. Each coefficient corresponds to a power of
/// `x` in increasing order. For example, the following polynomial is equal to 4x^2 + 3x - 9.
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// // Construct polynomial 4x^2 + 3x - 9
/// let mut a = poly![-9, 3, 4];
/// assert_eq!(a[0], -9);
/// assert_eq!(a[1], 3);
/// assert_eq!(a[2], 4);
/// # }
/// ```
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Polynomial<T>(Vec<T>);
impl<T> Polynomial<T> {
/// Create a new, empty, instance of a polynomial.
pub fn new() -> Polynomial<T> {
Polynomial(Vec::<T>::new())
}
/// Adds a new coefficient to the [`Polynomial`], in the next highest order position.
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let mut a = poly![-8, 2, 4];
/// a.push(7);
/// assert_eq!(a, poly![-8, 2, 4, 7]);
/// # }
/// ```
pub fn push(&mut self, value: T) {
self.0.push(value);
}
/// Removes the highest order coefficient from the [`Polynomial`].
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let mut a = poly![-8, 2, 4];
/// assert_eq!(a.pop().unwrap(), 4);
/// assert_eq!(a, poly![-8, 2]);
/// # }
/// ```
pub fn pop(&mut self) -> Option<T> {
self.0.pop()
}
/// Calculates the degree of a [`Polynomial`].
///
/// The following polynomial is of degree 2: (4x^2 + 2x - 8)
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let a = poly![-8, 2, 4];
/// assert_eq!(a.degree(), 2);
/// # }
/// ```
pub fn degree(&self) -> usize
where
T: Sub<T, Output = T> + Eq + Copy,
{
let mut deg = self.0.len();
for _ in 0..self.0.len() {
deg -= 1;
// Generic test if non-zero
if self[deg] != self[deg] - self[deg] {
break;
}
}
deg
}
/// Evaluate a [`Polynomial`] for some value `x`.
///
/// The following example evaluates the polynomial (4x^2 + 2x - 8) for x = 3.
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let a = poly![-8, 2, 4];
/// assert_eq!(a.eval(3).unwrap(), 34);
/// # }
/// ```
pub fn eval<X>(&self, x: X) -> Option<T>
where
T: AddAssign + Copy,
X: MulAssign + Mul<T, Output = T> + Copy,
{
if self.0.len() == 0 {
None
} else {
let mut p = x; // running power of `x`
let mut res = self[0];
for i in 1..self.0.len() {
res += p * self[i];
p *= x;
}
Some(res)
}
}
pub fn iter(&self) -> impl Iterator<Item = &T> {
self.0.iter()
}
pub fn into_iter(self) -> impl IntoIterator<Item = T, IntoIter = IntoIter<T>> {
self.0.into_iter()
}
}
impl<T> From<Vec<T>> for Polynomial<T> {
fn from(v: Vec<T>) -> Self {
Polynomial(v)
}
}
impl<T> Into<Vec<T>> for Polynomial<T> {
fn into(self) -> Vec<T> {
self.0
}
}
impl<T, I: SliceIndex<[T]>> Index<I> for Polynomial<T> {
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
&self.0[index]
}
}
impl<T, I: SliceIndex<[T]>> IndexMut<I> for Polynomial<T> {
fn index_mut(&mut self, index: I) -> &mut Self::Output {
&mut self.0[index]
}
}
/// Add two [`Polynomial`]s.
///
/// The following example adds two polynomials:
/// (4x^2 + 2x - 8) + (x + 1) = (4x^2 + 3x - 7)
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let a = poly![-8, 2, 4];
/// let b = poly![1, 1];
/// assert_eq!(a + b, poly![-7, 3, 4]);
/// # }
/// ```
impl<T: Add<Output = T>> Add for Polynomial<T>
where
T: Add + Copy + Clone,
{
type Output = Self;
fn add(mut self, other: Self) -> Self::Output {
self += other;
self
}
}
impl<T> AddAssign for Polynomial<T>
where
T: Add<Output = T> + Copy,
{
fn add_assign(&mut self, rhs: Self) {
let min_len = if self.0.len() < rhs.0.len() {
self.0.len()
} else {
rhs.0.len()
};
if self.0.len() == min_len {
for i in min_len..rhs.0.len() {
self.push(rhs[i]);
}
}
for i in 0..min_len {
self[i] = self[i] + rhs[i];
}
}
}
/// Subtract two [`Polynomial`]s.
///
/// The following example subtracts two polynomials:
/// (4x^2 + 2x - 8) - (x + 1) = (4x^2 + x - 9)
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let a = poly![-8, 2, 4];
/// let b = poly![1, 1];
/// assert_eq!(a - b, poly![-9, 1, 4]);
/// # }
/// ```
impl<T: Sub<Output = T>> Sub for Polynomial<T>
where
T: Sub + Neg<Output = T> + Copy + Clone,
{
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
let mut diff = self.clone();
diff -= other;
diff
}
}
impl<T> SubAssign for Polynomial<T>
where
T: Sub<Output = T> + Neg<Output = T> + Copy,
{
fn sub_assign(&mut self, rhs: Self) {
let min_len = if self.0.len() < rhs.0.len() {
self.0.len()
} else {
rhs.0.len()
};
if self.0.len() == min_len {
for i in min_len..rhs.0.len() {
self.push(-rhs[i]);
}
}
for i in 0..min_len {
self[i] = self[i] - rhs[i];
}
}
}
/// Multiply two [`Polynomial`]s.
///
/// The following example multiplies two polynomials:
/// (4x^2 + 2x - 8) * (x + 1) = (4x^3 + 6x^2 - 6x - 8)
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let a = poly![-8, 2, 4];
/// let b = poly![1, 1];
/// assert_eq!(a * b, poly![-8, -6, 6, 4]);
/// # }
/// ```
impl<T> Mul<T> for Polynomial<T>
where
T: MulAssign + Copy,
{
type Output = Self;
fn mul(self, rhs: T) -> Self::Output {
let mut prod = self.clone();
prod *= rhs;
prod
}
}
impl<T> MulAssign<T> for Polynomial<T>
where
T: MulAssign + Copy,
{
fn mul_assign(&mut self, rhs: T) {
for i in 0..self.0.len() {
self[i] *= rhs;
}
}
}
/// Multiply a [`Polynomial`] by some value.
///
/// The following example multiplies a polynomial (4x^2 + 2x - 8) by 2:
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let p = poly![-8, 2, 4] * 2;
/// assert_eq!(p, poly![-16, 4, 8]);
/// # }
/// ```
impl<T> Mul for Polynomial<T>
where
T: Mul<Output = T> + AddAssign + Sub<Output = T>,
T: Copy + Clone,
{
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
let mut new = self.clone();
new *= rhs;
new
}
}
impl<T> MulAssign for Polynomial<T>
where
T: Mul<Output = T> + AddAssign + Sub<Output = T>,
T: Copy + Clone,
{
fn mul_assign(&mut self, rhs: Self) {
let orig = self.clone();
// One of the vectors must be non-empty
if self.0.len() > 0 || rhs.0.len() > 0 {
// Since core::num does not provide the `Zero()` trait
// this hack lets us calculate zero from any generic
let zero = if self.0.len() > 0 {
self[0] - self[0]
} else {
rhs[0] - rhs[0]
};
// Clear `self`
for i in 0..self.0.len() {
self.0[i] = zero;
}
// Resize vector with size M + N - 1
self.0.resize(self.0.len() + rhs.0.len() - 1, zero);
// Calculate product
for i in 0..orig.0.len() {
for j in 0..rhs.0.len() {
self[i + j] += orig[i] * rhs[j];
}
}
}
}
}
/// Divide a [`Polynomial`] by some value.
///
/// The following example divides a polynomial (4x^2 + 2x - 8) by 2:
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let p = poly![-8, 2, 4] / 2;
/// assert_eq!(p, poly![-4, 1, 2]);
/// # }
/// ```
impl<T> Div<T> for Polynomial<T>
where
T: DivAssign + Copy,
{
type Output = Self;
fn div(self, rhs: T) -> Self::Output {
let mut prod = self.clone();
prod /= rhs;
prod
}
}
impl<T> DivAssign<T> for Polynomial<T>
where
T: DivAssign + Copy,
{
fn div_assign(&mut self, rhs: T) {
for i in 0..self.0.len() {
self[i] /= rhs;
}
}
}
impl<T> PartialEq for Polynomial<T>
where
T: Sub<T, Output = T> + Eq + Copy,
{
fn eq(&self, other: &Self) -> bool {
let degree = self.degree();
if degree != other.degree() {
return false;
}
for i in 0..=degree {
if self[i] != other[i] {
return false;
}
}
true
}
}
impl<T> Eq for Polynomial<T> where T: Sub<T, Output = T> + Eq + Copy {}
/// Creates a [`Polynomial`] from a list of coefficients in ascending order.
///
/// This is a wrapper around the `vec!` macro, to instantiate a polynomial from
/// a vector of coefficients.
///
/// `poly!` allows `Polynomial`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Polynomial`] containing a given list of coefficients:
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let p = poly![1, 2, 3]; // 3x^2 + 2x + 1
/// assert_eq!(p[0], 1);
/// assert_eq!(p[1], 2);
/// assert_eq!(p[2], 3);
/// # }
/// ```
///
/// - Create a [`Polynomial`] from a given coefficient and size:
///
/// ```
/// # #[macro_use] extern crate polynomials;
/// # fn main() {
/// let p = poly![1; 3]; // x^2 + x + 1
/// assert_eq!(p, poly![1, 1, 1]);
/// # }
/// ```
#[macro_export]
macro_rules! poly {
($($args:tt)*) => (
$crate::Polynomial::from(vec![$($args)*])
);
}
#[cfg(test)]
mod tests {
use super::*;
use sparse::SparsePolynomial;
#[test]
fn degree() {
assert_eq!(poly![8, 6, 2, 3].degree(), 3);
assert_eq!(poly![8, 6, 2, 3].degree(), 3);
assert_eq!(poly![0, 0, 6, 2, 3].degree(), 4);
assert_eq!(poly![0, 0].degree(), 0);
assert_eq!(poly![0, 99].degree(), 1);
assert_eq!(poly![99, 0].degree(), 0);
}
#[test]
fn eval() {
assert_eq!(poly![1, 1, 1, 1].eval(1).unwrap(), 4);
assert_eq!(poly![-2, -2, -2, -2].eval(1).unwrap(), -8);
assert_eq!(poly![100, 0, 0, 0].eval(9).unwrap(), 100);
assert_eq!(poly![0, 1, 0, 0].eval(9).unwrap(), 9);
assert_eq!(poly![0, 0, -1, 0].eval(9).unwrap(), -81);
assert_eq!(poly![0, -9, 0, 40].eval(2).unwrap(), 302);
}
#[test]
fn iter() {
assert_eq!(poly![0, -9, 0, 40].iter().sum::<isize>(), 31);
}
#[test]
fn add() {
let a = poly![-200, 6, 2, 3, 53, 0, 0]; // Higher order 0s should be ignored
let b = poly![-1, -6, -7, 0, 1000];
let c = poly![-201, 0, -5, 3, 1053];
assert_eq!(a.clone() + b.clone(), c);
assert_eq!(b + a, c);
}
#[test]
fn add_assign() {
let mut a = poly![-200, 6, 2, 3, 53, 0, 0]; // Higher order 0s should be ignored
let b = poly![-1, -6, -7, 0, 1000];
let c = poly![-201, 0, -5, 3, 1053];
a += b;
assert_eq!(a, c);
let mut a = poly![1]; // Low degree should be expanded
let b = poly![0, 1];
let c = poly![1, 1];
a += b;
assert_eq!(a, c);
}
#[test]
fn sub() {
let a = poly![-200, 6, 2, 3, 53, 0, 0]; // Higher order 0s should be ignored
let b = poly![-1, -6, -7, 0, 1000];
let c = poly![-199, 12, 9, 3, -947];
let d = poly![199, -12, -9, -3, 947];
assert_eq!(a.clone() - b.clone(), c);
assert_eq!(b - a, d);
}
#[test]
fn sub_assign() {
let mut a = poly![-200, 6, 2, 3, 53, 0, 0]; // Higher order 0s should be ignored
let b = poly![-1, -6, -7, 0, 1000];
let c = poly![-199, 12, 9, 3, -947];
a -= b;
assert_eq!(a, c);
let mut a = poly![1]; // Low degree should be expanded
let b = poly![0, 1];
let c = poly![1, -1];
a -= b;
assert_eq!(a, c);
}
#[test]
fn mul() {
let a = poly![1, 0, 0]; // Higher order 0s should be ignored
let b = poly![0];
let c = poly![0];
assert_eq!(a * b, c);
let a = poly![-7];
let b = poly![4];
let c = poly![-28];
assert_eq!(a * b, c);
let a = poly![0, 1];
let b = poly![4];
let c = poly![0, 4];
assert_eq!(a * b, c);
let a = poly![0, -1];
let b = poly![0, 1];
let c = poly![0, 0, -1];
assert_eq!(a * b, c);
}
#[test]
fn mul_assign() {
let mut a = poly![1, 0, 0]; // Higher order 0s should be ignored
let b = poly![0];
let c = poly![0];
a *= b;
assert_eq!(a, c);
let mut a = poly![-7];
let b = poly![4];
let c = poly![-28];
a *= b;
assert_eq!(a, c);
let mut a = poly![0, 1];
let b = poly![4];
let c = poly![0, 4];
a *= b;
assert_eq!(a, c);
let mut a = poly![0, -1];
let b = poly![0, 1];
let c = poly![0, 0, -1];
a *= b;
assert_eq!(a, c);
}
#[test]
fn mul_by_value() {
let a = poly![1, 2, 3];
let b = poly![2, 4, 6];
assert_eq!(a * 2, b);
let mut a = poly![1, 2, 3];
let b = poly![2, 4, 6];
a *= 2;
assert_eq!(a, b);
}
#[test]
fn div_by_value() {
let a = poly![2, 4, 6];
let b = poly![1, 2, 3];
assert_eq!(a / 2, b);
let mut a = poly![2, 4, 6];
let b = poly![1, 2, 3];
a /= 2;
assert_eq!(a, b);
}
#[test]
fn equality() {
let a = poly![1, 0];
let b = poly![-1, 0];
assert!(a != b);
}
#[test]
fn sparse_degree() {
assert_eq!(SparsePolynomial::from(vec![(0,8), (1,6), (100,2), (5,3)]).degree(), 100);
assert_eq!(SparsePolynomial::from(vec![(0,8), (5,3)]).degree(), 5);
assert_eq!(SparsePolynomial::from(vec![(0,8)]).degree(), 0);
}
#[test]
fn sparse_add() {
let mut a = SparsePolynomial::from(vec![(0,1),(1,1)]);
let mut b = SparsePolynomial::from(vec![(0,1),(1,1)]);
let mut c = a + b;
assert_eq!(SparsePolynomial::from(vec![(0,2), (1,2)]), c);
a = SparsePolynomial::from(vec![(0,-1),(1,1)]);
b = SparsePolynomial::from(vec![(0,1),(1,1)]);
c = a + b;
assert_eq!(SparsePolynomial::from(vec![(1,2)]), c);
}
#[test]
fn sparse_mul() {
let a = SparsePolynomial::from(vec![(0,1),(1,1)]);
let b = SparsePolynomial::from(vec![(0,1),(1,1)]);
let c = a * b;
assert_eq!(SparsePolynomial::from(vec![(0,1),(1,2),(2,1)]), c);
}
#[test]
fn sparse_mul_high_degree() {
let a = SparsePolynomial::from(vec![(0,-1),(12,1)]);
let b = SparsePolynomial::from(vec![(12,9),(15,1),(100,3)]);
let c = a * b;
// checked on wolfram alpha
assert_eq!(SparsePolynomial::from(vec![(12,-9),(15,-1),(24,9),(27,1),(100,-3),(112,3)]), c);
}
#[test]
fn sparse_eval() {
let a = SparsePolynomial::from(vec![(0,-1),(12,1)]);
// checked on wolfram alpha
let mut y = a.eval(1);
assert_eq!(y, 0.into());
y = a.eval(2);
assert_eq!(y, 4_095.into());
y = a.eval(3);
assert_eq!(y, 531_440.into());
y = a.eval(4);
assert_eq!(y, 16_777_215.into());
y = a.eval(5);
assert_eq!(y, 244_140_624.into());
}
}
|
/**
******************************************************************************
* @file inet_hal.h
* @author Matthew McGowan
* @version V1.0.0
* @date 25-Sept-2014
* @brief Internet APIs
******************************************************************************
Copyright (c) 2013-2015 Particle Industries, Inc. All rights reserved.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, see <http://www.gnu.org/licenses/>.
******************************************************************************
*/
#ifndef DNS_HAL_H
#define DNS_HAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include "static_assert.h"
#if PLATFORM_ID>=4 && PLATFORM_ID<=8
#define HAL_IPv6 1
#else
#define HAL_IPv6 0
#endif
#if HAL_IPv6
typedef struct __attribute__((__packed__)) _HAL_IPAddress_t {
union {
uint32_t ipv4;
uint32_t ipv6[4];
};
uint8_t v; // 4 for Ipv4, 6 for Ipv6
} HAL_IPAddress;
STATIC_ASSERT(HAL_IPAddress_size, sizeof(HAL_IPAddress)==17);
#else
typedef struct __attribute__((__packed__)) _HAL_IPAddress_t {
union {
uint32_t ipv4;
};
} HAL_IPAddress;
STATIC_ASSERT(HAL_IPAddress_size, sizeof(HAL_IPAddress)==4);
#endif
inline bool is_ipv4(const HAL_IPAddress* address)
{
#if HAL_IPv6
return address->v==4;
#else
return true;
#endif
}
typedef struct __attribute__((__packed__)) _NetworkConfig_t {
HAL_IPAddress aucIP; // byte 0 is MSB, byte 3 is LSB
HAL_IPAddress aucSubnetMask; // byte 0 is MSB, byte 3 is LSB
HAL_IPAddress aucDefaultGateway; // byte 0 is MSB, byte 3 is LSB
HAL_IPAddress aucDHCPServer; // byte 0 is MSB, byte 3 is LSB
HAL_IPAddress aucDNSServer; // byte 0 is MSB, byte 3 is LSB
uint8_t uaMacAddr[6];
} NetworkConfig;
STATIC_ASSERT(NetworkConfig_size, sizeof(HAL_IPAddress)*5+6);
typedef uint32_t network_interface_t;
/**
*
* @param hostname buffer to receive the hostname
* @param hostnameLen length of the hostname buffer
* @param out_ip_addr The ip address in network byte order.
* @return
*/
int inet_gethostbyname(const char* hostname, uint16_t hostnameLen, HAL_IPAddress* out_ip_addr,
network_interface_t nif, void* reserved);
/**
*
* @param remoteIP The IP address. MSB..LSB [0..3]
* @param nTries
* @return >0 on success. 0 on timeout? <0 on error.
*/
int inet_ping(const HAL_IPAddress* address, network_interface_t nif, uint8_t nTries,
void* reserved);
#ifdef __cplusplus
}
#endif
#endif /* DNS_HAL_H */
|
"""
3D version of Res2Net (v1b) encoder with a UNet-like decoder.
Adapted from their official git repo:
https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net_v1b.py
"""
import pathlib
import math
from os import stat
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
if __name__ == '__main__':
import sys
curr_path = pathlib.Path(__file__).parent.absolute()
sys.path.append(str(curr_path.parent.parent.parent))
from lib.nets.basemodel import BaseModel
from lib.nets.component_factories import NormFactory3d, ActFactory
__all__ = ['Res2Net', 'res2net50_v1b', 'res2net101_v1b']
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
baseWidth=26, scale=4, stype='normal',
norm='batchnorm', act='relu'):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
downsample: None when stride = 1
baseWidth: basic width of conv3x3
scale: number of scale.
type: 'normal': normal set. 'stage': first block of a new stage.
"""
super(Bottle2neck, self).__init__()
self.norm = NormFactory3d(norm)
self.act = ActFactory(act)
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv3d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = self.norm.create(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if stype == 'stage':
self.pool = nn.AvgPool3d(kernel_size=3, stride=stride, padding=1)
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv3d(width, width, kernel_size=3, stride=stride,
padding=1, bias=False))
bns.append(self.norm.create(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv3d(width*scale, planes * self.expansion,
kernel_size=1, bias=False)
self.bn3 = self.norm.create(planes * self.expansion)
self.relu = self.act.create(inplace=True)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.stype=='stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale != 1 and self.stype=='normal':
out = torch.cat((out, spx[self.nums]),1)
elif self.scale != 1 and self.stype=='stage':
out = torch.cat((out, self.pool(spx[self.nums])),1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class UpBlock(nn.Module):
"""
Upsamples via linear interpolation.
"""
def __init__(self, side_in_dim, bot_in_dim, out_dim,
norm='batchnorm', act='relu'):
self.norm = NormFactory3d(norm)
self.act = ActFactory(act)
super(UpBlock, self).__init__()
self.side_conv = nn.Conv3d(side_in_dim, out_dim, 1, bias=False)
self.side_norm = self.norm.create(out_dim)
self.side_act = self.act.create()
self.bot_conv = nn.Conv3d(bot_in_dim, out_dim, 1, bias=False)
self.bot_norm = self.norm.create(out_dim)
self.bot_act = self.act.create()
self.final_conv = nn.Conv3d(out_dim, out_dim, 3, padding=1,
bias=False)
self.final_norm = self.norm.create(out_dim)
self.final_act = self.act.create()
def forward(self, side_in, bot_in):
"""
Args:
side_in (tensor): side input
up_in (tensor): activated input from below that needs upsampling
Returns:
out (tensor): activated features
"""
# process input from below (upsample by 2x)
bot = F.interpolate(bot_in, scale_factor=2, mode='trilinear',
align_corners=True)
bot = self.bot_act(self.bot_norm(self.bot_conv(bot)))
side = self.side_act(self.side_norm(self.side_conv(side_in)))
agg = bot + side
out = self.final_act(self.final_norm(self.final_conv(agg)))
return out
class DeepSupBlock(nn.Module):
def __init__(self, num_upsamples, in_channels, out_channels,
use_deconv=True):
super().__init__()
assert num_upsamples >= 1
self.num_upsamples = num_upsamples
self.in_channels = in_channels
self.out_channels = out_channels
self.use_deconv = use_deconv
modules_dict = OrderedDict()
base_channels = max(out_channels, 16)
for i in range(num_upsamples):
scale_out_dims = base_channels * 2 ** (self.num_upsamples - i - 1)
if i == 0:
modules_dict['in_conv1'] = nn.Conv3d(in_channels,
scale_out_dims, kernel_size=3, padding=1, bias=False)
modules_dict['in_bn1'] = nn.BatchNorm3d(scale_out_dims)
modules_dict['in_act1'] = nn.ReLU()
in_channels = scale_out_dims
if self.use_deconv:
modules_dict[f'scale{i+1}_up'] = nn.ConvTranspose3d(in_channels,
scale_out_dims, kernel_size=4, stride=2, padding=1,
bias=False)
modules_dict[f'scale{i+1}_bn'] = nn.BatchNorm3d(scale_out_dims)
modules_dict[f'scale{i+1}_act'] = nn.ReLU()
else:
modules_dict[f'scale{i+1}_up'] = nn.Upsample(scale_factor=2,
mode='trilinear', align_corners=True)
if i == num_upsamples - 1:
modules_dict['final_conv'] = nn.Conv3d(scale_out_dims,
out_channels, kernel_size=1, bias=True)
in_channels = scale_out_dims
self.decoder = nn.Sequential(modules_dict)
def forward(self, x):
return self.decoder(x)
class Res2UNet3d(BaseModel):
def __init__(self, block, layers, baseWidth=26, scale=4,
in_channels=1, num_classes=2, prelinear_dropout=0,
norm='batchnorm', act='relu', deep_sup=True):
self.prelinear_dropout = prelinear_dropout
self.deep_sup = deep_sup
self.num_classes = num_classes
self.norm = NormFactory3d(norm)
self.act = ActFactory(act)
super().__init__()
self.inplanes = 64
self.baseWidth = baseWidth
self.scale = scale
self.conv1 = nn.Sequential(
nn.Conv3d(in_channels, 32, 3, 2, 1, bias=False),
self.norm.create(32),
self.act.create(inplace=True),
nn.Conv3d(32, 32, 3, 1, 1, bias=False),
self.norm.create(32),
self.act.create(inplace=True),
nn.Conv3d(32, 64, 3, 1, 1, bias=False)
)
self.bn1 = self.norm.create(64)
self.relu = self.act.create()
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.up16 = UpBlock(1024, 2048, 256, norm=norm, act=act)
self.up8 = UpBlock(512, 256, 128, norm=norm, act=act)
self.up4 = UpBlock(256, 128, 64, norm=norm, act=act)
self.up2 = UpBlock(64, 64, 64, norm=norm, act=act)
self.up2_conv = nn.Conv3d(64, 32, 3, 1, 1, bias=False)
self.up2_norm = self.norm.create(32)
self.up2_act = self.act.create()
self.final_conv = nn.Conv3d(32, num_classes, 1, bias=True)
if self.deep_sup:
self.up2_deepsup = DeepSupBlock(1, 64, num_classes)
self.up4_deepsup = DeepSupBlock(2, 64, num_classes)
self.up8_deepsup = DeepSupBlock(3, 128, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
tot_params, tot_tparams = self.param_counts
print(f'💠 Res2UNet_3d model initiated with n_classes={num_classes}, \n'
f' layers={layers}, base-width={baseWidth}, scale={scale}, \n'
f' in_chans={in_channels}, deep_sup={self.deep_sup}, '
f'norm={norm}, act={act}, \n'
f' params={tot_params:,}, trainable_params={tot_tparams:,}.')
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool3d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False),
nn.Conv3d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
self.norm.create(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample=downsample, stype='stage',
baseWidth=self.baseWidth, scale=self.scale,
norm=self.norm, act=self.act))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth,
scale=self.scale, norm=self.norm, act=self.act))
return nn.Sequential(*layers)
def forward(self, x):
# Encoder path
x2 = self.conv1(x)
x2 = self.bn1(x2)
x2 = self.relu(x2)
x4 = self.maxpool(x2)
x4 = self.layer1(x4) # 4x down, activated
x8 = self.layer2(x4) # 8x down, activated
x16 = self.layer3(x8) # 16x down, activated
x32 = self.layer4(x16) # 32x down, activated
if self.prelinear_dropout > 0:
x32 = F.dropout(x32, p=self.prelinear_dropout,
training=self.training)
# Decoder path
out16 = self.up16(x16, x32)
out8 = self.up8(x8, out16)
out4 = self.up4(x4, out8)
out2 = self.up2(x2, out4)
out = self.up2_act(self.up2_norm(self.up2_conv(out2)))
out = F.interpolate(out, scale_factor=2, mode='trilinear',
align_corners=True)
out = self.final_conv(out)
if self.deep_sup:
return {
'2x': self.up2_deepsup(out2),
'4x': self.up4_deepsup(out4),
'8x': self.up8_deepsup(out8),
'out': out}
return {'out': out}
def res2net50_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b model.
Res2Net-50 refers to the Res2Net-50_v1b_26w_4s.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2UNet3d(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4,
**kwargs)
return model
def res2net101_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2UNet3d(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4,
**kwargs)
return model
# def res2net50_v1b_26w_4s(pretrained=False, **kwargs):
# """Constructs a Res2Net-50_v1b_26w_4s model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
# if pretrained:
# load_state_dict(model, 'res2net50_v1b_26w_4s')
# return model
# def res2net101_v1b_26w_4s(pretrained=False, **kwargs):
# """Constructs a Res2Net-50_v1b_26w_4s model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
# if pretrained:
# load_state_dict(model, 'res2net101_v1b_26w_4s')
# return model
# def res2net152_v1b_26w_4s(pretrained=False, **kwargs):
# """Constructs a Res2Net-50_v1b_26w_4s model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs)
# if pretrained:
# load_state_dict(model, 'res2net152_v1b_26w_4s')
# return model
# def load_state_dict(model, model_key):
# print(f' * Res2Net1b loading pretrained ImageNet weights.')
# # print(model.load_state_dict(model_zoo.load_url(model_urls[model_key])))
# # My code after downloading model params
# state_dict = torch.load(model_params[model_key], map_location='cpu')
# if model.num_classes != 1000:
# del state_dict['fc.weight']
# del state_dict['fc.bias']
# print(model.load_state_dict(state_dict, strict=False))
def get_model(layers, num_classes, pretrained=True, prelinear_dropout=0):
layers = int(layers)
if layers == 50:
model = res2net50_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
elif layers == 101:
model = res2net101_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
else:
raise ValueError(f'{layers} layers is not supported right now.')
return model
if __name__ == '__main__':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
images = torch.rand(1, 1, 64, 64, 64).to(device)
model = res2net50_v1b().to(device)
out = model(images)
for k, v in out.items():
print(k, v.shape, v.min(), v.max())
|
// Package console provides a simple interface for logging things to stdout & a log file
package console
import (
"fmt"
"os"
"runtime/debug"
"sync"
"time"
"github.com/fatih/color"
)
const (
// LevelDebug debug level includes debugging information and is very verbose
LevelDebug = 3
// LevelInfo informational messages for normal operation of the application
LevelInfo = 2
// LevelWarn warning messages for potential issues
LevelWarn = 1
// LevelError error messages for problems
LevelError = 0
// LevelNone no messages
LevelNone = -1
)
// Console describes a log object
type Console struct {
config Config
file *os.File
mutex *sync.Mutex
}
// Config describes the configuration for a console session
type Config struct {
// Path the path to where the log file should live.
// omit this to disable logging to a file.
Path string
// WriteLevel the log level that events must be at least before they
// are written to the log file.
WriteLevel int
// PrintLevel the log level that events must be at least before they
// are written to console.
PrintLevel int
}
// New create a new console instance with the provided config.
func New(Config Config) (*Console, error) {
c := Console{
config: Config,
}
if Config.Path == "" {
return &c, nil
}
logFile, err := newFile(Config.Path)
if err != nil {
return nil, err
}
c.file = logFile
c.mutex = &sync.Mutex{}
return &c, nil
}
func newFile(logPath string) (*os.File, error) {
return os.OpenFile(logPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
}
// Close close the log file
func (l *Console) Close() {
if l.file != nil {
l.file.Close()
}
}
// Rotate retire the current log file into a gzipped file with todays date
func (l *Console) Rotate(destinationDir string) error {
if l.file == nil {
return nil
}
l.mutex.Lock()
defer l.mutex.Unlock()
destFileName := destinationDir + "/log." + time.Now().Format("2006-01-02")
l.Close()
l.file = nil
if err := os.Rename(l.config.Path, destFileName); err != nil {
fmt.Printf("Error rotating log file: %s\n", err.Error())
return err
}
newFile, err := newFile(l.config.Path)
if err != nil {
fmt.Printf("Error rotating log file: %s\n", err.Error())
return err
}
l.file = newFile
return nil
}
func (l *Console) write(message string) {
if l.file == nil {
return
}
l.mutex.Lock()
if l.file != nil {
_, err := l.file.WriteString(time.Now().Format(time.RFC3339) + " " + message + "\n")
if err != nil {
// Try opening the file again
l.file.Close()
newFile, err := newFile(l.config.Path)
if err != nil {
fmt.Printf("Error writing to log: %s", err.Error())
} else {
l.file = newFile
}
}
}
l.mutex.Unlock()
}
// Debug print debug information to the console if verbose logging is enabled
// Safe to call with sensitive data, but verbose logging should not be enabled on production instances
func (l *Console) Debug(format string, a ...interface{}) {
if l.config.PrintLevel >= LevelDebug {
fmt.Printf("%s %s\n", color.HiBlackString("[DEBUG]"), fmt.Sprintf(format, a...))
}
if l.config.WriteLevel >= LevelDebug {
l.write("[DEBUG] " + fmt.Sprintf(format, a...))
}
}
// Info print informational message to the console
func (l *Console) Info(format string, a ...interface{}) {
if l.config.PrintLevel >= LevelInfo {
fmt.Printf("%s %s\n", color.BlueString("[INFO] "), fmt.Sprintf(format, a...))
}
if l.config.WriteLevel >= LevelInfo {
l.write("[INFO] " + fmt.Sprintf(format, a...))
}
}
// Warn print warning information to the console
func (l *Console) Warn(format string, a ...interface{}) {
if l.config.PrintLevel >= LevelWarn {
fmt.Printf("%s %s\n", color.YellowString("[WARN] "), fmt.Sprintf(format, a...))
}
if l.config.WriteLevel >= LevelWarn {
l.write("[WARN] " + fmt.Sprintf(format, a...))
}
}
// Error print error information to the console
func (l *Console) Error(format string, a ...interface{}) {
stack := string(debug.Stack())
if l.config.PrintLevel >= LevelWarn {
fmt.Printf("%s %s\n%s\n", color.RedString("[ERROR]"), fmt.Sprintf(format, a...), stack)
}
if l.config.WriteLevel >= LevelWarn {
l.write(fmt.Sprintf("[ERROR] %s\n%s", fmt.Sprintf(format, a...), stack))
}
}
// ErrorDesc print an error object with description
func (l *Console) ErrorDesc(desc string, err error) {
l.Error("%s: %s", desc, err.Error())
}
// Fatal print fatal error and exit the app
func (l *Console) Fatal(format string, a ...interface{}) {
fmt.Printf("%s\n", color.RedString("[FATAL] "+fmt.Sprintf(format, a...)))
l.write("[FATAL] " + fmt.Sprintf(format, a...))
os.Exit(1)
}
|
package io.gustavoamigo.quill.pgsql.encoding.json.play
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
trait JsonEncoder {
this: JdbcSource[_, _] =>
import play.api.libs.json._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
row.setObject(index + 1, valueToString(value), Types.OTHER)
row
}
}
implicit val jsonEncoder: Encoder[JsValue] = genericEncoder(Json.stringify)
}
|
// Distributed under the MIT software license, see the accompanying
// file LICENSE or http://www.opensource.org/licenses/mit-license.php.
use hex::encode;
use qrllib::rust_wrapper::shasha::shasha::sha2_256;
#[test]
fn hashing_test() {
let mut input = String::from("This is a test X").into_bytes();
let count = input.len();
let mut output_hashed = sha2_256(&input);
assert_eq!(input.len(), 16);
assert_eq!(output_hashed.len(), 32);
assert_eq!(encode(input), "54686973206973206120746573742058");
assert_eq!(
encode(output_hashed),
"a11609b2cc5f26619fcc865473246c9ac59861383a3c4edd2433230258afa03b",
);
}
|
using Builder.Sample;
using System;
namespace Builder
{
class Program
{
static void Main(string[] args)
{
Sample.Builder notebookBuilder = new NoteBookBuilder();
Sample.Builder gameComputerBuilder = new GameComputerBuilder();
Director director = new Director();
director.Construct(notebookBuilder);
Computer notebook = notebookBuilder.GetComputer();
notebook.Processing();
Console.WriteLine("-------------");
director.Construct(gameComputerBuilder);
Computer gameComputer = gameComputerBuilder.GetComputer();
gameComputer.Processing();
Console.Read();
}
}
}
|
/*
* Copyright (C) 2017-2019 Hazuki
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.hazuki.yuzubrowser.core.utility.extensions
import android.content.ClipData
import android.content.ClipDescription
import android.content.ClipboardManager
import android.content.Context
import android.graphics.Bitmap
import android.graphics.Point
import android.os.Build
import android.util.TypedValue
import android.view.WindowManager
import android.webkit.WebSettings
import androidx.annotation.AttrRes
import androidx.annotation.ColorInt
import androidx.annotation.ColorRes
import androidx.annotation.DimenRes
import androidx.core.content.ContextCompat
import java.io.File
@ColorInt
fun Context.getResColor(@ColorRes id: Int): Int {
return if (Build.VERSION.SDK_INT >= 23) {
resources.getColor(id, theme)
} else {
@Suppress("DEPRECATION")
resources.getColor(id)
}
}
fun Context.getThemeResId(@AttrRes id: Int): Int {
val outValue = TypedValue()
theme.resolveAttribute(id, outValue, true)
return outValue.resourceId
}
fun Context.dimension(@DimenRes id: Int): Int = resources.getDimensionPixelSize(id)
var Context.clipboardText: String
get() {
val manager = getSystemService(Context.CLIPBOARD_SERVICE) as ClipboardManager
val clip = manager.primaryClip ?: return ""
return clip.getItemAt(0).text?.toString() ?: ""
}
set(text) {
val clipData = ClipData("text_data", arrayOf(ClipDescription.MIMETYPE_TEXT_PLAIN), ClipData.Item(text))
val manager = getSystemService(Context.CLIPBOARD_SERVICE) as ClipboardManager
manager.primaryClip = clipData
}
fun Context.convertDpToPx(dp: Int): Int = (resources.displayMetrics.density * dp + 0.5f).toInt()
fun Context.convertDpToFloatPx(dp: Int): Float = resources.displayMetrics.density * dp + 0.5f
val Context.density: Float
get() = resources.displayMetrics.density
fun Context.getFakeChromeUserAgent(): String {
val ua = StringBuilder(WebSettings.getDefaultUserAgent(this))
ua.replace("; wv", "")
ua.replace("Version/4.0 ", "")
return ua.toString()
}
fun Context.readAssetsText(fileName: String): String {
return assets.open(fileName).reader().use { it.readText() }
}
fun Context.getVersionName(): String {
val info = packageManager.getPackageInfo(packageName, 0)
return info.versionName
}
fun Context.getVersionCode(): Int {
val info = packageManager.getPackageInfo(packageName, 0)
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
info.longVersionCode.toInt()
} else {
@Suppress("DEPRECATION")
info.versionCode
}
}
fun Context.getBitmap(drawableId: Int): Bitmap {
val drawable = ContextCompat.getDrawable(this, drawableId)!!
return drawable.getBitmap()
}
fun Context.getDisplayHeight(): Int {
val display = (getSystemService(Context.WINDOW_SERVICE) as WindowManager).defaultDisplay
val point = Point()
display.getSize(point)
return point.y
}
val Context.appCacheFile: File
get() = getDir("appcache", Context.MODE_PRIVATE)
val Context.appCacheFilePath: String
get() = appCacheFile.absolutePath
|
from django.test import TransactionTestCase
from django.apps import apps
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from care.facility.models.patient_base import DiseaseStatusEnum
from care.utils.tests.test_base import TestBase
from care.users.models import District, State
class BasePatientRegistrationMigrationTest(TransactionTestCase):
"""
Test specific migrations
Make sure that `self.migrate_from` and `self.migrate_to` are defined.
"""
migrate_from = None
migrate_to = None
@property
def app(self):
return apps.get_containing_app_config(type(self).__module__).label
def setUp(self):
super().setUp()
assert self.migrate_to and self.migrate_from, \
f'TestCase {type(self).__name__} must define migrate_to and migrate_from properties'
self.migrate_from = [(self.app, self.migrate_from)]
self.migrate_to = [(self.app, self.migrate_to)]
self.executor = MigrationExecutor(connection)
self.old_apps = self.executor.loader.project_state(self.migrate_from).apps
# revert to the original migration
self.executor.migrate(self.migrate_from)
# ensure return to the latest migration, even if the test fails
self.addCleanup(self.force_migrate)
self.setUpBeforeMigration(self.old_apps)
self.executor.loader.build_graph()
self.executor.migrate(self.migrate_to)
self.new_apps = self.executor.loader.project_state(self.migrate_to).apps
def setUpBeforeMigration(self, apps):
"""
This method may be used to create stuff before the migrations.
Something like creating an instance of an old model.
"""
pass
@property
def new_model(self):
return self.new_apps.get_model(self.app, 'PatientRegistration')
@property
def old_model(self):
return self.old_apps.get_model(self.app, 'PatientRegistration')
def force_migrate(self, migrate_to=None):
self.executor.loader.build_graph() # reload.
if migrate_to is None:
# get latest migration of current app
migrate_to = [key for key in self.executor.loader.graph.leaf_nodes() if key[0] == self.app]
self.executor.migrate(migrate_to)
class DiseaseStatusMigrationTest(BasePatientRegistrationMigrationTest):
migrate_from = '0223_merge_20210427_1419'
migrate_to = '0224_change_disease_status_from_recover_to_recovered'
def create_patient(self):
data = self.data.copy()
data.pop('medical_history', [])
data.pop('state', '')
data.pop('district', '')
return self.old_model.objects.create(**data)
def setUpBeforeMigration(self, apps):
_state = State.objects.create(name='bihar')
_district = District.objects.create(state=_state, name='dharbhanga')
self.data = TestBase.get_patient_data(state=_state, district=_district)
self.data.update({
'disease_status': DiseaseStatusEnum.RECOVERY.value,
'state_id': _state.id,
'district_id': _district.id,
})
self.patient = self.create_patient()
def test_recover_changed_to_recovered(self):
patient = self.new_model.objects.get(id=self.patient.id)
self.assertEqual(patient.disease_status, DiseaseStatusEnum.RECOVERED.value)
|
#include <glib.h>
#include <string.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
typedef struct _GPtrArrayImplementation
{
gpointer *storage;
guint logical_size;
guint physical_size;
guint ref_count;
} GPtrArrayImplementation;
GPtrArray *g_ptr_array_new(void)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)malloc(sizeof(GPtrArrayImplementation));
array->logical_size = 0;
array->physical_size = 8;
array->ref_count = 1;
array->storage = (gpointer *) malloc(array->physical_size * sizeof(gpointer));
memset(array->storage, 0, array->physical_size * sizeof(gpointer));
return (GPtrArray *)array;
}
gpointer *g_ptr_array_free(GPtrArray *arrayInterface,
gboolean free_seg)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
gpointer *storage = array->storage;
if (free_seg)
{
free(array->storage);
array->storage = storage = NULL;
array->logical_size = array->physical_size = 0;
}
--array->ref_count;
if (array->ref_count == 0)
{
free(array);
}
else
{
array->logical_size = 0;
}
return storage;
}
gpointer
g_ptr_array_index(GPtrArray *arrayInterface,
guint index_)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
if (array->logical_size <= index_)
{
return NULL;
}
return array->storage[index_];
}
void
g_ptr_array_add(GPtrArray *arrayInterface,
gpointer data)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
if (array->logical_size == array->physical_size)
{
array->physical_size += 8;
gpointer *new_storage = (gpointer *) malloc(
array->physical_size * sizeof(gpointer)
);
memcpy(new_storage, array->storage, array->logical_size * sizeof(gpointer));
free(array->storage);
array->storage = new_storage;
}
array->storage[array->logical_size] = data;
++array->logical_size;
}
gboolean
g_ptr_array_remove(GPtrArray *arrayInterface,
gpointer data)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
guint a,b;
for (a = 0; a < array->logical_size; ++a)
{
if (array->storage[a] == data)
{
for (b = a + 1; b < array->logical_size; ++b)
{
array->storage[b - 1] = array->storage[b];
}
--array->logical_size;
return TRUE;
}
}
return FALSE;
}
gpointer
g_ptr_array_remove_index(GPtrArray *arrayInterface,
guint index_)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
guint a;
if (array->logical_size <= index_)
{
return NULL;
}
gpointer p = array->storage[index_];
for (a = index_; a < array->logical_size - 1; ++a)
{
array->storage[a] = array->storage[a + 1];
}
--array->logical_size;
return p;
}
void
g_ptr_array_sort(GPtrArray *arrayInterface,
GCompareFunc compare_func)
{
GPtrArrayImplementation *array = (GPtrArrayImplementation *)arrayInterface;
qsort(array->storage, array->logical_size, sizeof(gpointer), compare_func);
}
|
/**
* Tests for the Authenticator functionality.
*
* @author Sam Claus
* @version 1/17/17
* @copyright Tera Insights
*/
import { Authenticator } from '../src/Authenticator';
import { getServerKey, authenticate, verifySignature } from './TestUtils'
import { websafeBase64ToBytes } from '../src/Converters';
import mocha = require('mocha');
declare class TextEncoder {
constructor()
encode(str: string): Uint8Array
}
describe('Authenticator', () => {
it('should produce a valid authentication message', () => {
const authenticator = new Authenticator();
const message = 'A message to the server.';
return getServerKey().then(serverKey => {
return authenticator.generateKeyPair().then(() => {
return authenticator.importServerKey(serverKey).then(() => {
return authenticator.computeHMAC(message, 'utf-8').then(hmac => {
return authenticator.getPublic().then(clientKey => {
return authenticate({
hmac: hmac,
msg: message,
key: clientKey
});
});
});
});
});
});
});
it('should generate a key pair and export it, then import it and produce a valid authentication message', () => {
let authenticator = new Authenticator();
const message = 'A message to the server.';
return getServerKey().then(serverKey => {
return authenticator.generateKeyPair().then(() => {
return authenticator.exportKey(new Uint8Array([1, 0, 3, 7, 9, 8, 2, 2, 2])).then(extKey => {
authenticator = new Authenticator();
return authenticator.importKey(extKey, new Uint8Array([1, 0, 3, 7, 9, 8, 2, 2, 2])).then(() => {
return authenticator.importServerKey(serverKey).then(() => {
return authenticator.computeHMAC(message, 'utf-8').then(hmac => {
return authenticator.getPublic().then(clientKey => {
return authenticate({
hmac: hmac,
msg: message,
key: clientKey
});
});
});
});
});
});
});
});
});
});
|
export const generateId = (key?: string | number): string => {
return 'react-persistant-state-' + (key ?? generateHash(new Error().stack ?? ''))
}
const generateHash = (x: string): number => {
return x.split('').reduce((prevHash, currVal) => (((prevHash << 5) - prevHash) + currVal.charCodeAt(0)) | 0, 0)
}
|
"""
Import recipes from URLs to our database
"""
import re
import json
from txpx import background, EchoProcess
from txpx.process import LineGlueProtocol
from supperfeed.build import Recipe
LineGlueProtocol.MAX_LENGTH=10000
class ImportProcess(EchoProcess):
"""
Import a recipe by loading the json data dumped by the downloader process
"""
def __init__(self, *a, **kw):
EchoProcess.__init__(self, *a, **kw)
self.linebuf = []
def outLineReceived(self, line):
if re.match(r'^/\*+/$', line):
return self.finished()
self.linebuf.append(line)
def finished(self):
data = json.loads('\n'.join(self.linebuf))
recipe = Recipe.fromLoadedData(data)
recipe.save()
self.linebuf[:] = []
def importRecipe(url):
d = background(['recipeschema', url], proto=ImportProcess)
d.addCallback(lambda ok: Recipe.objects(importedFrom=url).first())
return d
|
2020年09月02日20时数据
Status: 200
1.被前男友当街打死女孩家属发声
微博热度:3394562
2.华春莹拿钟南山的话回应提问
微博热度:2213212
3.姐姐发光的样子真好看
微博热度:2057909
4.以家人之名
微博热度:1882778
5.中元节
微博热度:1278047
6.这就是灌篮录制路透
微博热度:1179158
7.金鹰奖首轮评选结果
微博热度:1161706
8.天津一小区住十万个骨灰盒
微博热度:1124255
9.停课致韩国万吨牛奶订单取消
微博热度:969399
10.多地发声要求非全日制学历一视同仁
微博热度:712200
11.泰国国王恢复诗妮娜王室头衔
微博热度:705100
12.沈腾公司名字
微博热度:626404
13.宁静粉丝拉妈妈爱你横幅
微博热度:557126
14.辛巴 张雨绮
微博热度:461619
15.温州通报海边拍婚纱多人被海浪卷走
微博热度:457861
16.小升初学生被要求自带床板上学
微博热度:457428
17.邓超看家长教育学
微博热度:434710
18.1个月来处置1.36万违规吃播账号
微博热度:421921
19.外交部回应印度士兵在边界冲突中丧生
微博热度:420028
20.Zoom股价大涨市值超IBM
微博热度:415017
21.刘诗诗好美
微博热度:409074
22.家长回应男孩泳池排便被索赔
微博热度:404911
23.月亮
微博热度:398641
24.有钱人获取恋爱的方式
微博热度:398408
25.带家属进入界碑护栏拍照民警被停职
微博热度:368901
26.花木兰定档
微博热度:365856
27.在劫难逃
微博热度:364488
28.北京连续三天出现彩虹
微博热度:357810
29.医保个人账户结存已超8000亿
微博热度:329823
30.上海3.3亿仿冒乐高案宣判
微博热度:320682
31.姜贞羽方否认恋情
微博热度:319565
32.李星星
微博热度:265310
33.明星给公司起名脑洞有多大
微博热度:232674
34.琉璃
微博热度:222305
35.花木兰海报画风
微博热度:200834
36.刘慈欣回应Netflix拍剧版三体
微博热度:190948
37.福建一水产批发市场发生火灾
微博热度:186383
38.突如其来的缘分有多好笑
微博热度:180742
39.一朵像猫爪的云
微博热度:180632
40.不要在公司展现其他技能
微博热度:165784
41.深圳进入强制垃圾分类时代
微博热度:160277
42.密室大逃脱
微博热度:155162
43.且听凤鸣大结局
微博热度:155095
44.中方敦促印方撤回所有非法越线人员
微博热度:155043
45.嵩县
微博热度:153674
46.小唐尼确认告别漫威电影
微博热度:134244
47.张雨绮工作室回应
微博热度:132529
48.日本细菌战资源库向社会免费开放
微博热度:131081
49.独臂篮球少年发视频回应篮网邀约
微博热度:127954
50.榨菜鲜肉月饼酥
微博热度:118623
|
using LinearAlgebra: Matrix
using Random, LinearAlgebra
f(X) = 3sin(X[1]/3.0) + 2cos(X[2]/2.0)
τ(n) = 1/√(10n)
mutable struct CMAES{T}
dim::Int
λ::Int
μ::Int
centroid::Array{T, 1}
c_m::T
weights::Array{T, 1}
μ_eff::T
σ::T
p_σ::Array{T, 1}
c_σ::T
d_σ::T
C::Array{T, 2}
p_c::Array{T, 1}
c_c::T
c_1::T
c_μ::T
end
# A の各列を引数としてFの最も高い1~n位のインデックスを求める
function best_n(A::Array, n::Int64, F::Function)
score = zeros(size(A)[2])
for i in 1:size(A)[2]
score[i] = F(A[:, i])
end
return sortperm(score, rev=true)[1:n]
end
function test_commaES(;μ::Int64, ρ::Int64, λ::Int64, F::Function, t=100)
if λ > μ
throw(DomainError(λ, "λ must be larger than μ"))
end
# 親群の初期化 (x, y, σ) x 10
P = 2 * rand(Float64, (3, μ)) .- 1
for n in 1:t
# 子供を作る
perm = best_n(P[1:2, :], ρ, F)
Pρ = copy(P[:, perm])
# 組み替え & 突然変異
C = zeros(3, λ)
ave_σ = sum(Pρ[3, :])/ρ
C[3, :] = ave_σ * exp.(τ(n) .* randn(λ))
ave_y = (sum(Pρ[1, :])./ρ, sum(Pρ[2, :])./ρ)
for i in 1:λ
C[1:2, i] = ave_y .+ C[3, i] .* randn(2)
end
# 次の親を選ぶ
perm = best_n(C[1:2, :], μ, F)
P = copy(C[:, perm])
end
return P
end
function test_plusES(;μ::Int64, ρ::Int64, λ::Int64, F::Function, t=100)
# 親群の初期化 (x, y, σ) x 10
P = 100 * randn(Float64, (3, μ))
for n in 1:t
# 子供を作る
perm = best_n(P[1:2, :], ρ, F)
Pρ = copy(P[:, perm])
# 組み替え & 突然変異
C = zeros(3, λ)
ave_σ = sum(Pρ[3, :])/ρ
C[3, :] = ave_σ * exp.(τ(n) .* randn(λ))
ave_y = (sum(Pρ[1, :])./ρ, sum(Pρ[2, :])./ρ)
for i in 1:λ
C[1:2, i] = ave_y .+ C[3, i] .* randn(2)
end
# 次の親を選ぶ
candidate = hcat(C, P)
perm = best_n(candidate[1:2, :], μ, F)
P = copy(candidate[:, perm])
end
return P
end
# size(X) == (λ, dim)
function get_fitness(X::Matrix{Float64}, f::Function)
ToReturn = zeros(Float64, size(X)[2])
for i in 1:size(X)[2]
ToReturn[i] = f(X[:, i])
end
return ToReturn
end
# CMAESパラメータの初期化
function init_CMAES(center::Array{T, 1}, σ::T, λ::Int) where T <: Real
## 初期化フェーズ
# 次元数
dim = length(center)
# 世代の総個体数 λ と エリート数 μ
λ = λ > 0 ? λ : round(Int, 4 + 3 * log(dim))
μ = λ÷2
# 正規分布の中心と学習率
centroid = T.(center)
c_m = one(T)
# 順位重み係数
weights = [log(0.5(λ+1)) - log(i) for i in 1:μ]
weights = T.(weights ./ sum(weights))
μ_eff = 1 / sum(weights.^2)
# ステップサイズ
p_σ = zeros(T, dim)
c_σ = (μ_eff + 2) / (dim + μ_eff + 5)
d_σ = 1 + 2 * max(0, sqrt((μ_eff - 1)/(dim + 1)) - 1) + c_σ
# 共分散行列 進化パス p と rank-μ, rank-one更新の学習率c
C = Matrix{T}(I, dim, dim)
p_c = zeros(T, dim)
c_c = (4 + μ_eff / dim) / (dim + 4 + 2 * μ_eff/dim)
c_1 = 2 / ((dim + 1.3f0)^2 + μ_eff)
c_μ = min(1 - c_1, 2 * (μ_eff - 2 + 1/μ_eff) / ((dim+2)^2 + μ_eff))
ToReturn = CMAES(dim, λ, μ, centroid, c_m, weights, μ_eff, σ, p_σ, c_σ, d_σ,
C, p_c, c_c, c_1, c_μ)
return ToReturn
end
# 個体の生成
function samplePopulation(self::CMAES{T}; rng=MersenneTwister(123)) where T <: Real
## 探索フェーズ
# z ~ N(0, I) なる個体を λ 個生成
Z = randn(rng, (self.dim, self.λ))
# C を固有値分解
Ei = eigen(self.C)
diagD = sqrt(abs.(Diagonal(Ei.values)))
B = Ei.vectors
BD = B * diagD
# y ~ N(0, C)
Y = BD * Z
# X ~ N(μ, σC)
X = self.centroid .+ self.σ * Y
return X
end
# 個体および行列 C の更新
function update!(self::CMAES{T}, X, fitnesses, gen) where T <: Real
### 更新フェーズ
""" 正規分布パラメータの更新
X : 個体群, shape == (λ, μ)
fitnesses : 適合度
gen : 現代の世代数
"""
## 1. Selection and recombination
old_centroid = self.centroid
old_σ = self.σ
# fitnesses が上位 μ までのインデックスを抽出
elite_indices = sortperm(fitnesses)[1:self.μ]
X_elite = X[:, elite_indices]
Y_elite = (X_elite .- old_centroid) ./ old_σ
X_w = (X_elite * self.weights)
Y_w = (Y_elite * self.weights)
# 正規分布中心の更新
self.centroid = (1 - self.c_m) * old_centroid .+ self.c_m * X_w
## 2. Step-size control
Ei = eigen(self.C)
diagD = sqrt(abs.(Diagonal(Ei.values)))
B = Ei.vectors
inv_diagD = inv(diagD)
# Note. B*Z == C_ * Y
C_ = B * inv_diagD * B'
new_p_σ = (1 - self.c_σ) * self.p_σ
new_p_σ += sqrt(self.c_σ * (2 - self.c_σ) * self.μ_eff) * C_ * Y_w
self.p_σ = new_p_σ
E_normal = sqrt(self.dim) * (1 - 1/(4*self.dim) + 1/(21 * self.dim ^ 2)) # 定数パラメータ
self.σ = self.σ * exp((self.c_σ / self.d_σ) * (sqrt(sum(self.p_σ .^ 2))/E_normal - 1))
if isinf(self.σ)
self.σ = old_σ
end
# 3. Covariance matrix adaptation (CMA)
# Note. h_σ(heaviside 関数) はステップサイズ σ が大きいときに C の更新を中断させるのに使う
left = sqrt(sum(self.p_σ.^2)) / sqrt(1 - (1 - self.c_σ) ^ (2 * (gen + 1)))
right = (1.4 + 2 / (self.dim + 1)) * E_normal
hσ = left < right ? 1 : 0
d_hσ = (1 - hσ) * self.c_c * (2 - self.c_c)
# p_c の更新
new_p_c = (1 - self.c_c) * self.p_c
new_p_c += hσ * sqrt(self.c_c * (2 - self.c_c) * self.μ_eff) * Y_w
self.p_c = new_p_c
# C の更新
new_C = (1 + self.c_1 * d_hσ - self.c_1 - self.c_μ) * self.C
new_C += self.c_1 * [i * j for i in self.p_c, j in self.p_c]
# 愚直な実装(スマートな実装はdeapのcma.pyを参照)
wyy = zeros(T, (self.dim, self.dim))
for i in 1:self.μ
y_i = Y_elite[i]
wyy .+= self.weights[i] * [y1 * y2 for y1 in y_i, y2 in y_i]
end
new_C += self.c_μ * wyy
self.C = new_C
end
|
set -x
CURRENT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source $CURRENT_DIR/version.sh
if [ ! -f $working_directory/sources/ssh_wait-$ssh_wait_version-py2.py3-none-any.whl ]; then
wget -P $working_directory/sources/ $ssh_wait_version_url
fi
rm -Rf $working_directory/build/ssh-wait
mkdir -p $working_directory/build/ssh-wait
cd $working_directory/build/ssh-wait
mkdir python3-ssh-wait-$ssh_wait_version
cp $working_directory/sources/ssh_wait-$ssh_wait_version-py2.py3-none-any.whl python3-ssh-wait-$ssh_wait_version/
$(which cp) -af $root_directory/ssh-wait/ssh-wait.spec python3-ssh-wait-$ssh_wait_version/
tar cvzf python3-ssh-wait-$ssh_wait_version.tar.gz python3-ssh-wait-$ssh_wait_version
rpmbuild -ta python3-ssh-wait-$ssh_wait_version.tar.gz --define "_software_version $ssh_wait_version"
if [ $distribution == "Ubuntu" ]; then
cd /root
alien --to-deb --scripts /root/rpmbuild/RPMS/x86_64/python3-ssh-wait-*
mkdir -p /root/debbuild/DEBS/noarch/
mv *.deb /root/debbuild/DEBS/noarch/
fi
set +x
|
package com.mrb.remember.presentation.levels
import androidx.lifecycle.MutableLiveData
import com.mrb.remember.domain.interactor.GetCompletedDay
import com.mrb.remember.domain.interactor.GetHomework
import com.mrb.remember.domain.interactor.SaveCompletedDay
import com.mrb.remember.domain.interactor.UseCase
import com.mrb.remember.domain.model.Homework
import com.mrb.remember.domain.model.LeitnerDay
import com.mrb.remember.presentation.platform.BaseViewModel
import com.mrb.remember.testing.OpenForTesting
import javax.inject.Inject
@OpenForTesting
class LevelsViewModel @Inject constructor(
private val getCompletedDay: GetCompletedDay,
private val saveCompletedDay: SaveCompletedDay,
private val getHomework: GetHomework
) : BaseViewModel() {
var homework: MutableLiveData<Homework> = MutableLiveData()
var completedDay: MutableLiveData<LeitnerDay> = MutableLiveData()
fun init() {
showLoading()
getCompletedDay(UseCase.None()) {
it.either(::handleFailure, ::handleLeitnerDay)
}
}
fun setCompletedDay(day: LeitnerDay) {
saveCompletedDay(SaveCompletedDay.Params(day)) {
it.either(::handleFailure, ::handleCompletedDay)
}
}
private fun handleCompletedDay(day: LeitnerDay) {
this.completedDay.value = day
}
private fun handleLeitnerDay(leitnerDay: LeitnerDay) {
getHomework(GetHomework.Params(leitnerDay.dayNumber + 1)) {
it.either(::handleFailure, ::handleHomework)
}
}
private fun handleHomework(homework: Homework) {
hideLoading()
this.homework.value = homework
}
}
|
/*
* Copyright (C) 2018 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.moshi
import com.google.auto.common.AnnotationMirrors
import com.google.auto.service.AutoService
import com.squareup.kotlinpoet.KModifier.OUT
import com.squareup.kotlinpoet.TypeSpec
import com.squareup.kotlinpoet.TypeVariableName
import me.eugeniomarletti.kotlin.metadata.KotlinClassMetadata
import me.eugeniomarletti.kotlin.metadata.KotlinMetadataUtils
import me.eugeniomarletti.kotlin.metadata.declaresDefaultValue
import me.eugeniomarletti.kotlin.metadata.extractFullName
import me.eugeniomarletti.kotlin.metadata.isDataClass
import me.eugeniomarletti.kotlin.metadata.isPrimary
import me.eugeniomarletti.kotlin.metadata.jvm.getJvmConstructorSignature
import me.eugeniomarletti.kotlin.metadata.kotlinMetadata
import me.eugeniomarletti.kotlin.metadata.visibility
import me.eugeniomarletti.kotlin.processing.KotlinAbstractProcessor
import org.jetbrains.kotlin.serialization.ProtoBuf
import java.io.File
import javax.annotation.processing.Processor
import javax.annotation.processing.RoundEnvironment
import javax.lang.model.SourceVersion
import javax.lang.model.element.Element
import javax.lang.model.element.ElementKind
import javax.lang.model.element.ExecutableElement
import javax.lang.model.element.TypeElement
import javax.tools.Diagnostic.Kind.ERROR
/**
* An annotation processor that reads Kotlin data classes and generates Moshi JsonAdapters for them.
* This generates Kotlin code, and understands basic Kotlin language features like default values
* and companion objects.
*
* The generated class will match the visibility of the given data class (i.e. if it's internal, the
* adapter will also be internal).
*
* If you define a companion object, a jsonAdapter() extension function will be generated onto it.
* If you don't want this though, you can use the runtime [JsonClass] factory implementation.
*/
@AutoService(Processor::class)
class JsonClassCodeGenProcessor : KotlinAbstractProcessor(), KotlinMetadataUtils {
private val annotation = JsonClass::class.java
override fun getSupportedAnnotationTypes() = setOf(annotation.canonicalName)
override fun getSupportedSourceVersion(): SourceVersion = SourceVersion.latest()
override fun process(annotations: Set<TypeElement>, roundEnv: RoundEnvironment): Boolean {
for (type in roundEnv.getElementsAnnotatedWith(annotation)) {
val jsonClass = type.getAnnotation(annotation)
if (jsonClass.generateAdapter) {
val adapterGenerator = processElement(type) ?: continue
adapterGenerator.generateAndWrite()
}
}
return true
}
private fun processElement(element: Element): AdapterGenerator? {
val metadata = element.kotlinMetadata
if (metadata !is KotlinClassMetadata) {
errorMustBeDataClass(element)
return null
}
val classData = metadata.data
val (nameResolver, classProto) = classData
fun ProtoBuf.Type.extractFullName() = extractFullName(classData)
if (!classProto.isDataClass) {
errorMustBeDataClass(element)
return null
}
val fqClassName = nameResolver.getString(classProto.fqName).replace('/', '.')
val packageName = nameResolver.getString(classProto.fqName).substringBeforeLast('/').replace(
'/', '.')
val hasCompanionObject = classProto.hasCompanionObjectName()
// todo allow custom constructor
val protoConstructor = classProto.constructorList
.single { it.isPrimary }
val constructorJvmSignature = protoConstructor.getJvmConstructorSignature(nameResolver,
classProto.typeTable)
val constructor = classProto.fqName
.let(nameResolver::getString)
.replace('/', '.')
.let(elementUtils::getTypeElement)
.enclosedElements
.mapNotNull {
it.takeIf { it.kind == ElementKind.CONSTRUCTOR }?.let { it as ExecutableElement }
}
.first()
// TODO Temporary until jvm method signature matching is better
// .single { it.jvmMethodSignature == constructorJvmSignature }
val parameters = protoConstructor
.valueParameterList
.mapIndexed { index, valueParameter ->
val paramName = nameResolver.getString(valueParameter.name)
val nullable = valueParameter.type.nullable
val paramFqcn = valueParameter.type.extractFullName()
.replace("`", "")
.removeSuffix("?")
val actualElement = constructor.parameters[index]
val serializedName = actualElement.getAnnotation(Json::class.java)?.name
?: paramName
val jsonQualifiers = AnnotationMirrors.getAnnotatedAnnotations(actualElement,
JsonQualifier::class.java)
PropertyGenerator(
name = paramName,
serializedName = serializedName,
hasDefault = valueParameter.declaresDefaultValue,
nullable = nullable,
typeName = valueParameter.type.asTypeName(nameResolver, classProto::getTypeParameter),
unaliasedName = valueParameter.type.asTypeName(nameResolver,
classProto::getTypeParameter, true),
jsonQualifiers = jsonQualifiers)
}
val genericTypeNames = classProto.typeParameterList
.map {
val variance = it.variance.asKModifier().let {
// We don't redeclare out variance here
if (it == OUT) {
null
} else {
it
}
}
TypeVariableName(
name = nameResolver.getString(it.name),
bounds = *(it.upperBoundList
.map { it.asTypeName(nameResolver, classProto::getTypeParameter) }
.toTypedArray()),
variance = variance)
.reified(it.reified)
}.let {
if (it.isEmpty()) {
null
} else {
it
}
}
return AdapterGenerator(
fqClassName = fqClassName,
packageName = packageName,
propertyList = parameters,
originalElement = element,
hasCompanionObject = hasCompanionObject,
visibility = classProto.visibility!!,
genericTypeNames = genericTypeNames,
elements = elementUtils)
}
private fun errorMustBeDataClass(element: Element) {
messager.printMessage(ERROR,
"@${JsonClass::class.java.simpleName} can't be applied to $element: must be a Kotlin data class",
element)
}
private fun AdapterGenerator.generateAndWrite() {
val fileSpec = generateFile()
val adapterName = fileSpec.members.filterIsInstance<TypeSpec>().first().name!!
val outputDir = generatedDir ?: mavenGeneratedDir(adapterName)
fileSpec.writeTo(outputDir)
}
private fun mavenGeneratedDir(adapterName: String): File {
// Hack since the maven plugin doesn't supply `kapt.kotlin.generated` option
// Bug filed at https://youtrack.jetbrains.com/issue/KT-22783
val file = filer.createSourceFile(adapterName).toUri().let(::File)
return file.parentFile.also { file.delete() }
}
}
|
package hackerrank
import "regexp"
// BracesValidation returns array of validated results.
func BracesValidation(values []string) []string {
var result []string
for _, list := range values {
status := "YES"
var temp string
for len(list) > 0 {
char, listL := divideFirst(list)
list = listL
ok,e:=regexp.MatchString("[\(\[\{]", char)
if ok {
temp += char
} else {
switch char {
case ")":
break
case "]":
break
case "}":
break
}
}
}
result = append(result, status)
}
return result
}
func divideFirst(values string) (first string, rest string) {
first, rest = string(values[0]), values[1:]
return
}
func divideLast(values string) (rest string, last string) {
rest, last = values[:len(values)-2], string(values[len(values)-1])
return
}
|
/*
* File:
* System:
* Module:
* Author:
* Copyright:
* Source: $HeadURL: $
* Last modified by: $Author: $
* Date: $Date: $
* Version: $Revision: $
* Description:
* Preconditions:
*/
package stallone.algebra;
import stallone.api.complex.IComplexIterator;
import stallone.api.doubles.IDoubleIterator;
import stallone.api.complex.IComplexArray;
import stallone.api.doubles.IDoubleArray;
import static stallone.doubles.DoubleArrayTest.*;
/**
* Generic implementation of IMatrixSum for complex operands.
*
* @author Frank Noe
*/
public class ArraySum //implements IMatrixSum
{
public IDoubleArray addToNewDense(final IDoubleArray a, final IDoubleArray b)
{
IDoubleArray target = a.copy();
sumDense(a, b, target);
return target;
}
public IDoubleArray addToNewSparse(final IDoubleArray a, final IDoubleArray b)
{
IDoubleArray target = a.copy();
sumSparse(a, b, target);
return target;
}
public void addToSparse(final IDoubleArray a, final IDoubleArray b)
{
assertEqualDimensions(a, b);
for (IDoubleIterator it = b.nonzeroIterator(); it.hasNext(); it.advance())
{
int i = it.row();
int j = it.column();
a.set(i, j, a.get(i, j) + it.get());
}
}
public void addToDense(final IDoubleArray a, final IDoubleArray b)
{
sumDense(a, b, a);
}
public void sumDense(final IDoubleArray a, final IDoubleArray b, final IDoubleArray target)
{
assertEqualDimensions(a, b);
assertEqualDimensions(a, target);
// direct access may not be efficient
// Extract some parameters for easier access
final int rowsA = a.rows();
final int colsA = a.columns();
for (int i = 0; i < rowsA; i++)
{
for (int j = 0; j < colsA; j++)
{
target.set(i, j, a.get(i, j) + b.get(i, j));
}
}
}
//@Override
public void sumSparse(final IDoubleArray a, final IDoubleArray b, final IDoubleArray target)
{
assertEqualDimensions(a, b);
assertEqualDimensions(a, target);
if (a != target)
{
target.zero();
for (IDoubleIterator it = a.nonzeroIterator(); it.hasNext(); it.advance())
{
target.set(it.row(), it.column(), it.get());
}
}
for (IDoubleIterator it = b.nonzeroIterator(); it.hasNext(); it.advance())
{
int i = it.row();
int j = it.column();
target.set(i, j, target.get(i, j) + it.get());
}
}
public IComplexArray addToNewDense(final IComplexArray a, final IComplexArray b)
{
IComplexArray target = a.copy();
sumDense(a, b, target);
return target;
}
public IComplexArray addToNewSparse(final IComplexArray a, final IComplexArray b)
{
IComplexArray target = a.copy();
sumSparse(a, b, target);
return target;
}
public void addToDense(final IComplexArray a, final IComplexArray b)
{
sumDense(a, b, a);
}
public void addToSparse(final IComplexArray a, final IComplexArray b)
{
assertEqualDimensions(a, b);
for (IComplexIterator it = b.nonzeroComplexIterator(); it.hasNext(); it.advance())
{
int i = it.row();
int j = it.column();
a.set(it.row(), it.column(), a.getRe(i, j) + it.getRe(), b.getIm(i, j) + it.getIm());
}
}
//@Override
public void sumDense(final IComplexArray a, final IComplexArray b, final IComplexArray target)
{
assertEqualDimensions(a, b);
assertEqualDimensions(a, target);
// Extract some parameters for easier access
final int colsA = a.columns();
final int rowsA = a.rows();
// For each row ...
for (int j = 0; j < rowsA; j++)
{
// For each column
for (int i = 0; i < colsA; i++)
{
target.set(i, j, a.getRe(i, j) + b.getRe(i, j), a.getIm(i, j) + b.getIm(i, j));
}
}
}
//@Override
public void sumSparse(final IComplexArray a, final IComplexArray b, final IComplexArray target)
{
assertEqualDimensions(a, b);
assertEqualDimensions(a, target);
if (a != target)
{
target.zero();
for (IComplexIterator it = a.nonzeroComplexIterator(); it.hasNext(); it.advance())
{
target.set(it.row(), it.column(), it.getRe(), it.getIm());
}
}
for (IComplexIterator it = b.nonzeroComplexIterator(); it.hasNext(); it.advance())
{
int i = it.row();
int j = it.column();
target.set(i, j, target.getRe(i, j) + it.getRe(), target.getIm(i, j) + it.getIm());
}
}
}
|
import unittest, json
from src import tkfinder
kazuya = {
"name": "kazuya",
"proper_name": "Kazuya",
"local_json": "kazuya.json",
"online_webpage": "http://rbnorway.org/kazuya-t7-frames",
"portrait": "https://i.imgur.com/kMvhDfU.jpg"
}
class MyTestCase(unittest.TestCase):
def test_get_commands(self):
result = tkfinder.get_commands_from("hwoarang")
self.assertIn("1, 1, 3, 3", result)
def test_get_close_moves(self):
close_moves = tkfinder.get_similar_moves("d/f+1, 2", "hwoarang")
self.assertIn("d/f+1, 3", close_moves)
def test_is_command_in_alias(self):
item = {'Alias': ["hs", "hellsweep", "Giant swing", "u/f3"]}
result = tkfinder.is_command_in_alias("hellsweep", item)
self.assertTrue(result)
result = tkfinder.is_command_in_alias("he", item)
self.assertFalse(result)
result = tkfinder.is_command_in_alias("uf3", item)
self.assertTrue(result)
def test_get_cha_name(self):
result = tkfinder.correct_character_name("hwoarang")
self.assertEqual("hwoarang", result)
result = tkfinder.correct_character_name("hwo")
self.assertEqual("hwoarang", result)
result = tkfinder.correct_character_name("kazu")
self.assertEqual(None, result)
def test_get_move_by_type(self):
self.assertIn("in rage f,n,d,d/f+1+4", tkfinder.get_by_move_type(kazuya, "Rage Drive"))
self.assertIn("d/f+2", tkfinder.get_by_move_type(kazuya, "Homing"))
def test_get_cha_move(self):
self.assertEqual("f,f,f+3", tkfinder.get_move(kazuya, "wr3")["Command"])
self.assertEqual("1,1,2", tkfinder.get_move(kazuya, "112")["Command"])
self.assertEqual("f,n,d,d/f+4,1", tkfinder.get_move(kazuya, "hs")["Command"])
self.assertEqual("f,n,d,d/f+4,1", tkfinder.get_move(kazuya, "cd41")["Command"])
self.assertEqual("f,n,d/f+2", tkfinder.get_move(kazuya, "ewgf")["Command"])
self.assertEqual("WS+1,2", tkfinder.get_move(kazuya, "ws12")["Command"])
self.assertEqual("b+2,1", tkfinder.get_move(kazuya, "b21")["Command"])
marduk = {
"name": "marduk",
"proper_name": "Marduk",
"local_json": "marduk.json",
"online_webpage": "http://rbnorway.org/marduk-t7-frames",
"portrait": "https://i.imgur.com/2OtX6nd.png"
}
self.assertEqual("d/f+3, d/f+1, 2", tkfinder.get_move(marduk, "df3df12")["Command"])
self.assertEqual("d/f+3, 1, d+2", tkfinder.get_move(marduk, "df31,d+2")["Command"])
self.assertEqual("d/f+3, 1, d+2", tkfinder.get_move(marduk, "df3,1d+2")["Command"])
self.assertEqual("d/f+3, 1, d+2", tkfinder.get_move(marduk, "df+3,1d2")["Command"])
self.assertEqual("u/b or u or u/f+3", tkfinder.get_move(marduk, "u3")["Command"])
self.assertEqual("u/b or u or u/f+3", tkfinder.get_move(marduk, "uf3")["Command"])
self.assertEqual("u/b or u or u/f+3", tkfinder.get_move(marduk, "ub3")["Command"])
leo = {
"name": "leo",
"proper_name": "Leo",
"local_json": "leo.json",
"online_webpage": "http://rbnorway.org/leo-t7-frames",
"portrait": "https://i.imgur.com/i1CO8SB.jpg"
}
self.assertEqual("WS+4, 1+2", tkfinder.get_move(leo, "ws41+2")["Command"])
self.assertEqual("b+1, 4", tkfinder.get_move(leo, "b14")["Command"])
self.assertEqual("KNK 3, 4", tkfinder.get_move(leo, "knk 34")["Command"])
self.assertEqual("KNK 1+2", tkfinder.get_move(leo, "knk 1+2")["Command"])
self.assertEqual("FC+d/f+3", tkfinder.get_move(leo, "fc df3")["Command"])
kazumi = {
"name": "kazumi",
"proper_name": "Kazumi",
"local_json": "kazumi.json",
"online_webpage": "http://rbnorway.org/kazumi-t7-frames",
"portrait": "https://i.imgur.com/ZNiaFwL.jpg"
}
self.assertEqual("b, f+2, 1, 1+2", tkfinder.get_move(kazumi, "bf211+2")["Command"])
self.assertEqual("u/f+4", tkfinder.get_move(kazumi, "uf4")["Command"])
chloe = {
"name": "lucky_chloe",
"proper_name": "Lucky Chloe",
"local_json": "lucky_chloe.json",
"online_webpage": "http://rbnorway.org/lucky-chloe-t7-frames",
"portrait": "https://i.imgur.com/iNXYpwT.jpg"
}
self.assertEqual("u/f+3(u+3 or u/b+3)", tkfinder.get_move(chloe, "uf3")["Command"])
def test_ling(self):
ling = {
"name": "xiaoyu",
"proper_name": "Xiaoyu",
"local_json": "xiaoyu.json",
"online_webpage": "http://rbnorway.org/xiaoyu-t7-frames",
"portrait": "https://i.imgur.com/zuojLtJ.jpg"
}
self.assertEqual("AOP u/b or u or u/f+3, 3", tkfinder.get_move(ling, "AOP uf33")["Command"])
self.assertEqual("AOP u/b or u or u/f+3, 3", tkfinder.get_move(ling, "AOP ub33")["Command"])
self.assertEqual("AOP u/b or u or u/f+3, 3", tkfinder.get_move(ling, "AOP u33")["Command"])
def test_move_simplifier(self):
move = "df+3, df+1, 1+2"
self.assertEqual("df3df11+2", tkfinder.move_simplifier(move))
def test_none(self):
entry1 = json.loads("[{\"Gif\": \"\"}]")
entry2 = json.loads("[{\"Gif\": \"something\"}]")
entry3 = json.loads("[{\"Gif\": null}]")
entry4 = json.loads("[{\"Test\": \"test\"}]")
self.assertTrue(not entry1[0]["Gif"])
self.assertTrue(entry2[0]["Gif"])
self.assertTrue(not entry3[0]["Gif"])
self.assertTrue(not 'Gif' in entry4)
self.assertTrue("ws12" == "ws12")
if __name__ == '__main__':
unittest.main()
|
#ifndef TMINE_RNG_HPP
#define TMINE_RNG_HPP
#include <cstdint>
#include <functional>
namespace tmine
{
// ptr to a function that provides random numbers in a range
// min and max of range are both inclusive
using rng_func = std::function<int32_t(int32_t, int32_t)>;
// default implementation of rng_func using std::mt19937 engine
int32_t default_rng(int32_t min, int32_t max);
} // namespace tmine
#endif
|
export { default as ArrayChip } from './ArrayChip';
export { default as DurationChip } from './DurationChip';
export {
default as HttpChip,
HttpStatusChip,
getHttpStatusCode,
} from './HttpChip';
export { default as NavigationChip } from './NavigationChip';
export { default as TitleChip } from './TitleChip';
export { default as GenericChip } from './GenericChip';
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^settings$', views.settings, name='settings'),
url(r'^settings/profile$', views.settings_profile, name='settings-profile'),
url(r'^settings/keys$', views.settings_keys, name='settings-keys'),
url(r'^(?P<username>[\w-]+)$', views.user, name='user'),
url(r'^(?P<username>[\w-]+)/(?P<project_slug>[\w-]+)$', views.project, name='project'),
]
|
/*
Copyright 2017-2019 VMware, Inc.
SPDX-License-Identifier: BSD-2-Clause
*/
package com.vmware.weathervane.workloadDriver.common.representation;
import java.util.List;
import com.vmware.weathervane.workloadDriver.common.core.Run;
import com.vmware.weathervane.workloadDriver.common.core.WorkloadStatus;
public class RunStateResponse {
private String status;
private String message;
private Run.RunState state;
private List<WorkloadStatus> workloadStati;
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public Run.RunState getState() {
return state;
}
public void setState(Run.RunState state) {
this.state = state;
}
public List<WorkloadStatus> getWorkloadStati() {
return workloadStati;
}
public void setWorkloadStati(List<WorkloadStatus> workloadStati) {
this.workloadStati = workloadStati;
}
}
|
require 'rails_helper'
RSpec.describe DraftPicksChannel, type: :channel do
let(:league) { create :league }
it 'successfully subscribes' do
subscribe league_id: league.id
expect(subscription).to be_confirmed
end
it 'rejects a subscription if the league_id is not present' do
subscribe league_id: nil
expect(subscription).to be_rejected
end
it 'rejects a subscription if the league_id is invalid' do
subscribe league_id: 'invalid'
expect(subscription).to be_rejected
end
end
|
/**
* @file SpeciesThermoFactory.cpp
*/
// Copyright 2001 California Institute of Technology
#ifdef WIN32
#pragma warning(disable:4786)
#endif
#include "SpeciesThermoFactory.h"
#include "SpeciesThermo.h"
#include "NasaThermo.h"
#include "ShomateThermo.h"
//#include "PolyThermoMgr.h"
#include "SimpleThermo.h"
#include "GeneralSpeciesThermo.h"
#include "Mu0Poly.h"
#include "SpeciesThermoMgr.h"
#include "speciesThermoTypes.h"
#include "xml.h"
#include "ctml.h"
using namespace ctml;
namespace Cantera {
SpeciesThermoFactory* SpeciesThermoFactory::s_factory = 0;
/**
* Examine the types of species thermo parameterizations,
* and return a SpeciesThermo manager that can handle the
* parameterizations present.
*/
static void getSpeciesThermoTypes(XML_Node* node,
int& has_nasa, int& has_shomate, int& has_simple,
int &has_other) {
const XML_Node& sparray = *node;
vector<XML_Node*> sp;
// get all of the species nodes
sparray.getChildren("species",sp);
size_t n, ns = sp.size();
for (n = 0; n < ns; n++) {
XML_Node* spNode = sp[n];
if (spNode->hasChild("thermo")) {
const XML_Node& th = sp[n]->child("thermo");
if (th.hasChild("NASA")) has_nasa = 1;
if (th.hasChild("Shomate")) has_shomate = 1;
if (th.hasChild("const_cp")) has_simple = 1;
if (th.hasChild("poly")) {
if (th.child("poly")["order"] == "1") has_simple = 1;
else throw CanteraError("newSpeciesThermo",
"poly with order > 1 not yet supported");
}
if (th.hasChild("Mu0")) has_other = 1;
} else {
throw UnknownSpeciesThermoModel("getSpeciesThermoTypes:",
spNode->attrib("name"), "missing");
}
}
}
/**
* Return a species thermo manager to handle the parameterizations
* specified in a CTML phase specification.
*/
SpeciesThermo* SpeciesThermoFactory::newSpeciesThermo(XML_Node* node) {
int inasa = 0, ishomate = 0, isimple = 0, iother = 0;
try {
getSpeciesThermoTypes(node, inasa, ishomate, isimple, iother);
} catch (UnknownSpeciesThermoModel) {
iother = 1;
popError();
}
if (iother) {
writelog("returning new GeneralSpeciesThermo");
return new GeneralSpeciesThermo();
}
return newSpeciesThermo(NASA*inasa
+ SHOMATE*ishomate + SIMPLE*isimple);
}
SpeciesThermo* SpeciesThermoFactory::
newSpeciesThermo(vector<XML_Node*> nodes) {
int n = static_cast<int>(nodes.size());
int inasa = 0, ishomate = 0, isimple = 0, iother = 0;
for (int j = 0; j < n; j++) {
try {
getSpeciesThermoTypes(nodes[j], inasa, ishomate, isimple, iother);
} catch (UnknownSpeciesThermoModel) {
iother = 1;
popError();
}
}
if (iother) {
return new GeneralSpeciesThermo();
}
return newSpeciesThermo(NASA*inasa
+ SHOMATE*ishomate + SIMPLE*isimple);
}
/**
* @todo is this used?
*/
SpeciesThermo* SpeciesThermoFactory::
newSpeciesThermoOpt(vector<XML_Node*> nodes) {
int n = static_cast<int>(nodes.size());
int inasa = 0, ishomate = 0, isimple = 0, iother = 0;
for (int j = 0; j < n; j++) {
try {
getSpeciesThermoTypes(nodes[j], inasa, ishomate, isimple, iother);
} catch (UnknownSpeciesThermoModel) {
iother = 1;
popError();
}
}
if (iother) {
return new GeneralSpeciesThermo();
}
return newSpeciesThermo(NASA*inasa
+ SHOMATE*ishomate + SIMPLE*isimple);
}
SpeciesThermo* SpeciesThermoFactory::newSpeciesThermo(int type) {
switch (type) {
case NASA:
return new NasaThermo;
case SHOMATE:
return new ShomateThermo;
case SIMPLE:
return new SimpleThermo;
case NASA + SHOMATE:
return new SpeciesThermoDuo<NasaThermo, ShomateThermo>;
case NASA + SIMPLE:
return new SpeciesThermoDuo<NasaThermo, SimpleThermo>;
case SHOMATE + SIMPLE:
return new SpeciesThermoDuo<ShomateThermo, SimpleThermo>;
default:
throw UnknownSpeciesThermo(
"SpeciesThermoFactory::newSpeciesThermo",type);
return 0;
}
}
/// Check the continuity of properties at the midpoint
/// temperature.
void NasaThermo::checkContinuity(string name, double tmid, const doublereal* clow,
doublereal* chigh) {
// heat capacity
doublereal cplow = poly4(tmid, clow);
doublereal cphigh = poly4(tmid, chigh);
doublereal delta = cplow - cphigh;
if (fabs(delta/cplow) > 0.001) {
writelog("\n\n**** WARNING ****\nFor species "+name+
", discontinuity in cp/R detected at Tmid = "
+fp2str(tmid)+"\n");
writelog("\tValue computed using low-temperature polynomial: "
+fp2str(cplow)+".\n");
writelog("\tValue computed using high-temperature polynomial: "
+fp2str(cphigh)+".\n");
}
// enthalpy
doublereal hrtlow = enthalpy_RT(tmid, clow);
doublereal hrthigh = enthalpy_RT(tmid, chigh);
delta = hrtlow - hrthigh;
if (fabs(delta/hrtlow) > 0.001) {
writelog("\n\n**** WARNING ****\nFor species "+name+
", discontinuity in h/RT detected at Tmid = "
+fp2str(tmid)+"\n");
writelog("\tValue computed using low-temperature polynomial: "
+fp2str(hrtlow)+".\n");
writelog("\tValue computed using high-temperature polynomial: "
+fp2str(hrthigh)+".\n");
}
// entropy
doublereal srlow = entropy_R(tmid, clow);
doublereal srhigh = entropy_R(tmid, chigh);
delta = srlow - srhigh;
if (fabs(delta/srlow) > 0.001) {
writelog("\n\n**** WARNING ****\nFor species "+name+
", discontinuity in s/R detected at Tmid = "
+fp2str(tmid)+"\n");
writelog("\tValue computed using low-temperature polynomial: "
+fp2str(srlow)+".\n");
writelog("\tValue computed using high-temperature polynomial: "
+fp2str(srhigh)+".\n");
}
}
/**
* Install a NASA polynomial thermodynamic property
* parameterization for species k into a SpeciesThermo instance.
* This is called by method installThermoForSpecies if a NASA
* block is found in the XML input.
*/
static void installNasaThermoFromXML(string speciesName,
SpeciesThermo& sp, int k,
const XML_Node* f0ptr, const XML_Node* f1ptr) {
doublereal tmin0, tmax0, tmin1, tmax1, tmin, tmid, tmax;
const XML_Node& f0 = *f0ptr;
// default to a single temperature range
bool dualRange = false;
// but if f1ptr is suppled, then it is a two-range
// parameterization
if (f1ptr) {dualRange = true;}
tmin0 = fpValue(f0["Tmin"]);
tmax0 = fpValue(f0["Tmax"]);
tmin1 = tmax0;
tmax1 = tmin1 + 0.0001;
if (dualRange) {
tmin1 = fpValue((*f1ptr)["Tmin"]);
tmax1 = fpValue((*f1ptr)["Tmax"]);
}
vector_fp c0, c1;
if (fabs(tmax0 - tmin1) < 0.01) {
// f0 has the lower T data, and f1 the higher T data
tmin = tmin0;
tmid = tmax0;
tmax = tmax1;
getFloatArray(f0.child("floatArray"), c0, false);
if (dualRange)
getFloatArray(f1ptr->child("floatArray"), c1, false);
else {
// if there is no higher range data, then copy c0 to c1.
c1.resize(7,0.0);
copy(c0.begin(), c0.end(), c1.begin());
}
}
else if (fabs(tmax1 - tmin0) < 0.01) {
// f1 has the lower T data, and f0 the higher T data
tmin = tmin1;
tmid = tmax1;
tmax = tmax0;
getFloatArray(f1ptr->child("floatArray"), c0, false);
getFloatArray(f0.child("floatArray"), c1, false);
}
else {
throw CanteraError("installNasaThermo",
"non-continuous temperature ranges.");
}
// The NasaThermo species property manager expects the
// coefficients in a different order, so rearrange them.
array_fp c(15);
c[0] = tmid;
doublereal p0 = OneAtm;
c[1] = c0[5];
c[2] = c0[6];
copy(c0.begin(), c0.begin()+5, c.begin() + 3);
c[8] = c1[5];
c[9] = c1[6];
copy(c1.begin(), c1.begin()+5, c.begin() + 10);
sp.install(speciesName, k, NASA, &c[0], tmin, tmax, p0);
}
#ifdef INCL_NASA96
/**
* Install a NASA96 polynomial thermodynamic property
* parameterization for species k into a SpeciesThermo instance.
*/
static void installNasa96ThermoFromXML(string speciesName,
SpeciesThermo& sp, int k,
const XML_Node* f0ptr, const XML_Node* f1ptr) {
doublereal tmin0, tmax0, tmin1, tmax1, tmin, tmid, tmax;
const XML_Node& f0 = *f0ptr;
bool dualRange = false;
if (f1ptr) {dualRange = true;}
tmin0 = fpValue(f0["Tmin"]);
tmax0 = fpValue(f0["Tmax"]);
tmin1 = tmax0;
tmax1 = tmin1 + 0.0001;
if (dualRange) {
tmin1 = fpValue((*f1ptr)["Tmin"]);
tmax1 = fpValue((*f1ptr)["Tmax"]);
}
vector_fp c0, c1;
if (fabs(tmax0 - tmin1) < 0.01) {
tmin = tmin0;
tmid = tmax0;
tmax = tmax1;
getFloatArray(f0.child("floatArray"), c0, false);
if (dualRange)
getFloatArray(f1ptr->child("floatArray"), c1, false);
else {
c1.resize(7,0.0);
copy(c0.begin(), c0.end(), c1.begin());
}
}
else if (fabs(tmax1 - tmin0) < 0.01) {
tmin = tmin1;
tmid = tmax1;
tmax = tmax0;
getFloatArray(f1ptr->child("floatArray"), c0, false);
getFloatArray(f0.child("floatArray"), c1, false);
}
else {
throw CanteraError("installNasaThermo",
"non-continuous temperature ranges.");
}
array_fp c(15);
c[0] = tmid;
doublereal p0 = OneAtm;
c[1] = c0[5];
c[2] = c0[6];
copy(c0.begin(), c0.begin()+5, c.begin() + 3);
c[8] = c1[5];
c[9] = c1[6];
copy(c1.begin(), c1.begin()+5, c.begin() + 10);
sp.install(speciesName, k, NASA, &c[0], tmin, tmax, p0);
}
#endif
/**
* Install a Shomate polynomial thermodynamic property
* parameterization for species k.
*/
static void installShomateThermoFromXML(string speciesName,
SpeciesThermo& sp, int k,
const XML_Node* f0ptr, const XML_Node* f1ptr) {
doublereal tmin0, tmax0, tmin1, tmax1, tmin, tmid, tmax;
const XML_Node& f0 = *f0ptr;
bool dualRange = false;
if (f1ptr) {dualRange = true;}
tmin0 = fpValue(f0["Tmin"]);
tmax0 = fpValue(f0["Tmax"]);
tmin1 = tmax0;
tmax1 = tmin1 + 0.0001;
if (dualRange) {
tmin1 = fpValue((*f1ptr)["Tmin"]);
tmax1 = fpValue((*f1ptr)["Tmax"]);
}
vector_fp c0, c1;
if (fabs(tmax0 - tmin1) < 0.01) {
tmin = tmin0;
tmid = tmax0;
tmax = tmax1;
getFloatArray(f0.child("floatArray"), c0, false);
if (dualRange)
getFloatArray(f1ptr->child("floatArray"), c1, false);
else {
c1.resize(7,0.0);
copy(c0.begin(), c0.begin()+7, c1.begin());
}
}
else if (fabs(tmax1 - tmin0) < 0.01) {
tmin = tmin1;
tmid = tmax1;
tmax = tmax0;
getFloatArray(f1ptr->child("floatArray"), c0, false);
getFloatArray(f0.child("floatArray"), c1, false);
}
else {
throw CanteraError("installShomateThermo",
"non-continuous temperature ranges.");
}
array_fp c(15);
c[0] = tmid;
doublereal p0 = OneAtm;
copy(c0.begin(), c0.begin()+7, c.begin() + 1);
copy(c1.begin(), c1.begin()+7, c.begin() + 8);
sp.install(speciesName, k, SHOMATE, &c[0], tmin, tmax, p0);
}
/**
* Install a constant-cp thermodynamic property
* parameterization for species k.
*/
static void installSimpleThermoFromXML(string speciesName,
SpeciesThermo& sp, int k,
const XML_Node& f) {
doublereal tmin, tmax;
tmin = fpValue(f["Tmin"]);
tmax = fpValue(f["Tmax"]);
if (tmax == 0.0) tmax = 1.0e30;
vector_fp c(4);
c[0] = getFloat(f, "t0", "-");
c[1] = getFloat(f, "h0", "-");
c[2] = getFloat(f, "s0", "-");
c[3] = getFloat(f, "cp0", "-");
doublereal p0 = OneAtm;
sp.install(speciesName, k, SIMPLE, &c[0], tmin, tmax, p0);
}
/**
* Install a species thermodynamic property parameterization
* for one species into a species thermo manager.
* @param k species number
* @param s XML node specifying species
* @param spthermo species thermo manager
*/
void SpeciesThermoFactory::
installThermoForSpecies(int k, const XML_Node& s,
SpeciesThermo& spthermo) {
/*
* Check to see that the species block has a thermo block
* before processing. Throw an error if not there.
*/
if (!(s.hasChild("thermo"))) {
throw UnknownSpeciesThermoModel("installSpecies",
s["name"], "<nonexistent>");
}
const XML_Node& thermo = s.child("thermo");
const vector<XML_Node*>& tp = thermo.children();
int nc = static_cast<int>(tp.size());
if (nc == 1) {
const XML_Node* f = tp[0];
if (f->name() == "Shomate") {
installShomateThermoFromXML(s["name"], spthermo, k, f, 0);
}
else if (f->name() == "const_cp") {
installSimpleThermoFromXML(s["name"], spthermo, k, *f);
}
else if (f->name() == "NASA") {
installNasaThermoFromXML(s["name"], spthermo, k, f, 0);
}
else if (f->name() == "Mu0") {
installMu0ThermoFromXML(s["name"], spthermo, k, f);
}
else {
throw UnknownSpeciesThermoModel("installSpecies",
s["name"], f->name());
}
}
else if (nc == 2) {
const XML_Node* f0 = tp[0];
const XML_Node* f1 = tp[1];
if (f0->name() == "NASA" && f1->name() == "NASA") {
installNasaThermoFromXML(s["name"], spthermo, k, f0, f1);
}
else if (f0->name() == "Shomate" && f1->name() == "Shomate") {
installShomateThermoFromXML(s["name"], spthermo, k, f0, f1);
}
else {
throw UnknownSpeciesThermoModel("installSpecies", s["name"],
f0->name() + " and "
+ f1->name());
}
}
else {
throw UnknownSpeciesThermoModel("installSpecies", s["name"],
"multiple");
}
}
}
|
import React = require('react');
const { connect } = require('react-redux');
import FlatButton from 'material-ui/FlatButton';
import Dialog from 'material-ui/Dialog';
import Map from './Map';
import { track, fetchLocation } from '../actions';
export default connect(null, { fetchLocation, track })(
class extends React.PureComponent<
{
isOpen: boolean;
latitude: number;
longitude: number;
onSubmit: Function;
onCancel: Function;
fetchLocation: Function;
track: typeof track;
},
{ latitude: number; longitude: number }
> {
constructor(props) {
super(props);
this.state = this.getInitialState();
}
getInitialState() {
return {
latitude: this.props.latitude,
longitude: this.props.longitude
};
}
handleFetchLocation = async () => {
const { latitude, longitude } = await this.props.fetchLocation();
if (Number.isFinite(latitude) && Number.isFinite(longitude)) this.setState({ latitude, longitude });
this.props.track('Location', 'location-fetch');
};
handleChange = state => this.setState(state);
handleSubmit = () => {
this.props.onSubmit(this.state);
};
handleCancel = () => {
this.setState(this.getInitialState());
this.props.onCancel();
};
componentWillUpdate(nextProps) {
if (nextProps.latitude !== this.props.latitude || nextProps.longitude !== this.props.longitude) {
this.setState({
latitude: nextProps.latitude,
longitude: nextProps.longitude
});
}
}
render() {
const { onSubmit, isOpen } = this.props;
const { latitude, longitude } = this.state;
const actions = [
<FlatButton label="Use network location" onClick={this.handleFetchLocation} style={{ float: 'left' }} />,
<FlatButton label="Cancel" onClick={this.handleCancel} />,
<FlatButton label="Submit" primary={true} onClick={this.handleSubmit} />
];
return (
<Dialog title="Select your location" actions={actions} modal open={isOpen} onRequestClose={this.handleCancel}>
<Map
containerElement={<div style={{ height: `400px` }} />}
mapElement={<div style={{ height: `100%` }} />}
latitude={latitude}
longitude={longitude}
onChange={this.handleChange}
/>
</Dialog>
);
}
}
);
|
require "payler/version"
module Payler
class << self
# Starts payler session and returns hash
# required: order_id, amount
# optional: type, product, currency, recurrent, total, template, lang, userdata, pay_page_param_*
# defaults: type=OneStep, currency=RUB, recurrent=true
#
# = Success request
# Payler.start_session({order_id: '123', amount: '100'})
# =>
# {:amount=>100,
# :session_id=>"v5aqGue1Hpw9TzGBBSkBRf02KDSv3Sfd7jhN",
# :order_id=>"123"}
#
# = Error
# Payler.start_session({order_id: '123'})
# =>
# {:error=>{:code=>20, :message=>"Некорректное значение параметра: amount"}}
def start_session(params)
params.reverse_merge!(session_params)
send_request('StartSession', params)
end
# Creates recurrent payment
# card template object with fields: recurrent_template_id
# required: order_id, amount, recurrent_remplate_id
def repeat_pay(params, card_template)
params.reverse_merge!(recurrent_params(card_template))
send_request('RepeatPay', params)
end
# Returns status of order
# required: order_id
def get_status(order_id)
send_request('GetStatus', order_id: order_id)
end
# Returns advanced status of order
# required: order_id
def get_advanced_status(order_id)
send_request('GetAdvancedStatus', order_id: order_id)
end
# Returns additional info about recurrent template
# required: recurrent_template_id
def get_template(recurrent_template_id)
send_request('GetTemplate', recurrent_template_id: recurrent_template_id)
end
# Refund payment
# required: order_id, amount
def refund(params)
params.reverse_merge!(password_params)
send_request('Refund', params)
end
# списание средств, заблокированных на карте покупателя в рамках
# двухстадийного платежа (в запросе StartSession параметр type должен иметь
# значение «TwoStep
# required: order_id, amount
def charge(params)
params.reverse_merge!(password_params)
end
# отмена блокировки средств (частичная или полная)
# required: order_id, amount
def retrieve(params)
params.reverse_merge!(password_params)
send_request('Retrieve', params)
end
# Запрос активации/деактивации шаблона рекуррентных платежей
# required: recurrent_template_id, active:boolean
def activate_template(params)
send_request('ActivateTemplate', params)
end
# Поиск платёжной сессии по идентификатору платежа (order_id). Это метод может быть полезен в некоторых случаях
# required: order_id
#
# = Пример ответа на успешный запрос:
# {
# "id": "VLaFQpI88NpCncTA1TkhlX6HtkhzwQAKhxvz",
# "created": "2015-10-26 17:11:30",
# "valid_through": "2015-10-26 17:11:30",
# "type": "OneStep",
# "order_id": "ad7ad8b4-d50e-4b68-72f4-ca1264a8fae4",
# "amount": 30000,
# "product": "el-ticket",
# "currency": "RUB",
# "pay_page_params": "{"key": "value"}",
# "userdata": "data",
# "lang": "RU",
# "recurrent": "true"
# }
def find_session(order_id)
send_request('FindSession', order_id: order_id)
end
# Performs request to payler
# Returns response hash
def send_request(api_method, params)
params.reverse_merge!(default_params)
uri = URI.parse(server_url + api_method)
req = Net::HTTP::Post.new(uri.path)
req.set_form_data(params)
https = Net::HTTP.new(uri.host, uri.port)
https.use_ssl = uri.scheme == "https"
response = https.start { |http| http.request(req) }
response_hash = JSON.parse(response.body, symbolize_names: true)
response_hash[:success] = true if response.kind_of? Net::HTTPSuccess
response_hash
end
def server_url
PAYLER['server_url']
end
def default_params
{ key: PAYLER['key'] }
end
def password_params
{ password: PAYLER['password'] }
end
def recurrent_params(card_template)
recurrent_remplate_id = card_template.is_a?(String) ? card_template : card_template.recurrent_remplate_id
{ recurrent_remplate_id: recurrent_remplate_id }
end
def session_params
{
currency: 'RUB',
recurrent: true,
type: 'OneStep'
}
end
end
end
|
module PostsHelper
def post_belongs_to_user(post)
return false unless current_user.present?
post.blog.user_id == current_user.id
end
end
|
<?php
namespace App\Http\Controllers\HomePage;
use App\Models\CarInfoPage\CarInfoPage;
use App\Models\Page\Page;
use Illuminate\Http\Request;
use App\Http\Controllers\Controller;
/**
* Class HomePageController
* @package App\Http\Controllers\HomePage
*/
class HomePageController extends Controller
{
/**
* @return \Illuminate\Contracts\View\Factory|\Illuminate\View\View
*/
public function __invoke()
{
$carInfoPages = CarInfoPage::query()->where([])->orderBy('page_alias')->get();
return view('welcome', [
'carInfoPages' => $carInfoPages,
]);
}
}
|
package extracells.network
import cpw.mods.fml.common.FMLCommonHandler
import cpw.mods.fml.common.eventhandler.SubscribeEvent
import cpw.mods.fml.common.network.FMLEventChannel
import cpw.mods.fml.common.network.FMLNetworkEvent.ClientCustomPacketEvent
import cpw.mods.fml.common.network.FMLNetworkEvent.ServerCustomPacketEvent
import cpw.mods.fml.common.network.NetworkRegistry
import cpw.mods.fml.common.network.NetworkRegistry.TargetPoint
import extracells.feature.part.fluidterminal.gui.FluidTerminalContainer
import extracells.feature.part.fluidterminal.gui.FluidTerminalGui
import extracells.feature.part.fluidterminal.netwotk.FluidTerminalClientPacket
import extracells.feature.part.fluidterminal.netwotk.FluidTerminalServerPacket
import net.minecraft.client.Minecraft
import net.minecraft.entity.player.EntityPlayerMP
import net.minecraft.network.NetHandlerPlayServer
internal class ECNetworkHandler private constructor() {
companion object {
const val CHANNEL_NAME = "EC2"
lateinit var instance: ECNetworkHandler
private set
fun register() {
this.instance = ECNetworkHandler()
}
}
private val channel: FMLEventChannel
init {
FMLCommonHandler.instance().bus().register(this)
this.channel = NetworkRegistry.INSTANCE.newEventDrivenChannel(CHANNEL_NAME)
this.channel.register(this)
}
// region send funs
fun sendToAll(message: ECPacket) {
this.channel.sendToAll(message.createProxy())
}
fun sendToPlayer(message: ECPacket, player: EntityPlayerMP) {
this.channel.sendTo(message.createProxy(), player)
}
fun sendToAllAround(message: ECPacket, point: TargetPoint) {
this.channel.sendToAllAround(message.createProxy(), point)
}
fun sendToDimension(message: ECPacket, dimensionId: Int) {
this.channel.sendToDimension(message.createProxy(), dimensionId)
}
fun sendToServer(message: ECPacket) {
this.channel.sendToServer(message.createProxy())
}
// endregion send funs
// region handler login
@SubscribeEvent
fun handleServerPacket(ev: ServerCustomPacketEvent) {
val server = ev.packet.handler() as NetHandlerPlayServer
val packet = ECPacket.createFrom(ev.packet.payload())
val player = server.playerEntity
when (packet.type) {
ECPacketType.FluidTerminalServer -> {
val fluidTerminal = player.openContainer as? FluidTerminalContainer
fluidTerminal?.handleServerPacket(packet as FluidTerminalServerPacket)
}
ECPacketType.FluidTerminalClient -> {
null // no-op client side packet
}
}?.javaClass // used to make when exhaustive
}
@SubscribeEvent
fun handleClientPacket(ev: ClientCustomPacketEvent) {
val packet = ECPacket.createFrom(ev.packet.payload())
when (packet.type) {
ECPacketType.FluidTerminalClient -> {
val mc = Minecraft.getMinecraft()
println(mc)
val fluidTerminalGui = Minecraft.getMinecraft().currentScreen as FluidTerminalGui
fluidTerminalGui?.handleClientPacket(packet as FluidTerminalClientPacket)
}
ECPacketType.FluidTerminalServer -> {
null // no-op server side packet
}
}?.javaClass // used to make when exhaustive
}
// endregion handler logic
}
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
import pytest
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import _NATIVE_AMP_AVAILABLE, AMPType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
from tests.helpers import BoringModel
from tests.helpers.datamodules import MNISTDataModule
def test_num_training_batches(tmpdir):
"""
Tests that the correct number of batches are allocated
"""
# when we have fewer batches in the dataloader we should use those instead of the limit
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)
trainer.fit(model)
assert len(model.train_dataloader()) == 10
assert len(model.val_dataloader()) == 10
assert isinstance(trainer.num_val_batches, list)
assert trainer.num_val_batches[0] == 10
assert trainer.num_training_batches == 10
# when we have more batches in the dataloader we should limit them
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)
trainer.fit(model)
assert len(model.train_dataloader()) == 10
assert len(model.val_dataloader()) == 10
assert isinstance(trainer.num_val_batches, list)
assert trainer.num_val_batches[0] == 7
assert trainer.num_training_batches == 7
def test_overfit_batch_limits(tmpdir):
# ------------------------------------------------------
# Make sure shuffle is correct across loaders initially
# ------------------------------------------------------
model = EvalModelTemplate()
model.train_dataloader()
# original train loader which should be replaced in all methods
train_loader = model.train_dataloader()
# make sure the val and tests are not shuffled
assert isinstance(train_loader.sampler, RandomSampler)
assert isinstance(model.val_dataloader().sampler, SequentialSampler)
assert isinstance(model.test_dataloader().sampler, SequentialSampler)
# ------------------------------------------------------
# get the training loader and batch
# ------------------------------------------------------
# Create a reference train dataloader without shuffling.
train_loader = DataLoader(model.train_dataloader().dataset, shuffle=False)
(xa, ya) = next(iter(train_loader))
train_loader = DataLoader(model.train_dataloader().dataset, shuffle=True)
full_train_samples = len(train_loader)
num_train_samples = int(0.11 * full_train_samples)
# ------------------------------------------------------
# set VAL and Test loaders
# ------------------------------------------------------
val_loader = DataLoader(model.val_dataloader().dataset, shuffle=False)
test_loader = DataLoader(model.test_dataloader().dataset, shuffle=False)
# set the model loaders
model.train_dataloader = lambda: train_loader
model.val_dataloader = lambda: val_loader
model.test_dataloader = lambda: test_loader
# ------------------------------------------------------
# test train loader applies correct limits
# ------------------------------------------------------
trainer = Trainer(overfit_batches=4)
trainer.reset_train_dataloader(model)
assert trainer.num_training_batches == 4
# make sure the loaders are the same
(xb, yb) = next(iter(trainer.train_dataloader))
assert torch.eq(xa, xb).all()
assert torch.eq(ya, yb).all()
trainer = Trainer(overfit_batches=0.11)
trainer.reset_train_dataloader(model)
# The dataloader should have been overwritten with a Sequential sampler.
assert trainer.train_dataloader is not train_loader
assert trainer.num_training_batches == num_train_samples
# make sure the loaders are the same
(xb, yb) = next(iter(trainer.train_dataloader))
assert torch.eq(xa, xb).all()
assert torch.eq(ya, yb).all()
# ------------------------------------------------------
# run tests for both val and test
# ------------------------------------------------------
for split in ['val', 'test']:
# ------------------------------------------------------
# test overfit_batches as percent
# ------------------------------------------------------
loader_num_batches, dataloaders = Trainer(overfit_batches=0.11)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == num_train_samples
# make sure we turned off shuffle for the user
assert isinstance(dataloaders[0].sampler, SequentialSampler)
# make sure the loaders are the same
(xb, yb) = next(iter(dataloaders[0]))
assert torch.eq(xa, xb).all()
assert torch.eq(ya, yb).all()
# ------------------------------------------------------
# test overfit_batches as int
# ------------------------------------------------------
loader_num_batches, dataloaders = Trainer(overfit_batches=1)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == 1
loader_num_batches, dataloaders = Trainer(overfit_batches=5)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == 5
# ------------------------------------------------------
# test limit_xxx_batches as percent AND int
# ------------------------------------------------------
if split == 'val':
loader_num_batches, dataloaders = Trainer(limit_val_batches=0.1)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == int(0.1 * len(val_loader))
loader_num_batches, dataloaders = Trainer(limit_val_batches=10)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == 10
else:
loader_num_batches, dataloaders = Trainer(limit_test_batches=0.1)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == int(0.1 * len(test_loader))
loader_num_batches, dataloaders = Trainer(limit_test_batches=10)._reset_eval_dataloader(model, split)
assert loader_num_batches[0] == 10
def test_model_reset_correctly(tmpdir):
""" Check that model weights are correctly reset after scaling batch size. """
tutils.reset_seed()
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
)
before_state_dict = deepcopy(model.state_dict())
trainer.tuner.scale_batch_size(model, max_trials=5)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key])), \
'Model was not reset correctly after scaling batch size'
def test_trainer_reset_correctly(tmpdir):
""" Check that all trainer parameters are reset correctly after scaling batch size. """
tutils.reset_seed()
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
)
changed_attributes = [
'max_steps',
'weights_summary',
'logger',
'callbacks',
'checkpoint_callback',
'limit_train_batches',
'current_epoch',
]
attributes_before = {}
for ca in changed_attributes:
attributes_before[ca] = getattr(trainer, ca)
trainer.tuner.scale_batch_size(model, max_trials=5)
attributes_after = {}
for ca in changed_attributes:
attributes_after[ca] = getattr(trainer, ca)
for key in changed_attributes:
assert attributes_before[key] == attributes_after[key], \
f'Attribute {key} was not reset correctly after learning rate finder'
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.parametrize('scale_arg', ['power', 'binsearch', True])
def test_auto_scale_batch_size_trainer_arg(tmpdir, scale_arg):
""" Test possible values for 'batch size auto scaling' Trainer argument. """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(**hparams)
before_batch_size = hparams.get('batch_size')
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
auto_scale_batch_size=scale_arg,
gpus=1,
)
trainer.tune(model)
after_batch_size = model.batch_size
assert before_batch_size != after_batch_size, \
'Batch size was not altered after running auto scaling of batch size'
assert not os.path.exists(tmpdir / 'scale_batch_size_temp_model.ckpt')
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.parametrize('use_hparams', [True, False])
def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams):
""" Test that new batch size gets written to the correct hyperparameter attribute. """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
before_batch_size = hparams.get('batch_size')
class HparamsEvalModelTemplate(EvalModelTemplate):
def dataloader(self, *args, **kwargs):
# artificially set batch_size so we can get a dataloader
# remove it immediately after, because we want only self.hparams.batch_size
setattr(self, "batch_size", before_batch_size)
dataloader = super().dataloader(*args, **kwargs)
del self.batch_size
return dataloader
datamodule_model = MNISTDataModule(data_dir=tmpdir, batch_size=111) # this datamodule should get ignored!
datamodule_fit = MNISTDataModule(data_dir=tmpdir, batch_size=before_batch_size)
model_class = HparamsEvalModelTemplate if use_hparams else EvalModelTemplate
model = model_class(**hparams)
model.datamodule = datamodule_model # unused when another module gets passed to .tune() / .fit()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
auto_scale_batch_size=True,
gpus=1,
)
trainer.tune(model, datamodule_fit)
after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size
assert trainer.datamodule == datamodule_fit
assert before_batch_size != after_batch_size
assert after_batch_size <= len(trainer.train_dataloader.dataset)
assert datamodule_fit.batch_size == after_batch_size
# should be left unchanged, since it was not passed to .tune()
assert datamodule_model.batch_size == 111
def test_auto_scale_batch_size_duplicate_attribute_warning(tmpdir):
""" Test for a warning when model.batch_size and model.hparams.batch_size both present. """
class TestModel(BoringModel):
def __init__(self, batch_size=1):
super().__init__()
# now we have model.batch_size and model.hparams.batch_size
self.batch_size = 1
self.save_hyperparameters()
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, max_epochs=1000, auto_scale_batch_size=True)
expected_message = "Field `model.batch_size` and `model.hparams.batch_size` are mutually exclusive!"
with pytest.warns(UserWarning, match=expected_message):
trainer.tune(model)
@pytest.mark.parametrize('scale_method', ['power', 'binsearch'])
def test_call_to_trainer_method(tmpdir, scale_method):
""" Test that calling the trainer method itself works. """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(**hparams)
before_batch_size = hparams.get('batch_size')
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
)
after_batch_size = trainer.tuner.scale_batch_size(model, mode=scale_method, max_trials=5)
model.batch_size = after_batch_size
trainer.fit(model)
assert before_batch_size != after_batch_size, \
'Batch size was not altered after running auto scaling of batch size'
def test_error_on_dataloader_passed_to_fit(tmpdir):
"""Verify that when the auto scale batch size feature raises an error
if a train dataloader is passed to fit """
# only train passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
auto_scale_batch_size='power',
)
fit_options = dict(train_dataloader=model.dataloader(train=True))
with pytest.raises(MisconfigurationException):
trainer.tune(model, **fit_options)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.skipif(not _NATIVE_AMP_AVAILABLE, reason="test requires native AMP.")
def test_auto_scale_batch_size_with_amp(tmpdir):
model = EvalModelTemplate()
batch_size_before = model.batch_size
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=1,
auto_scale_batch_size=True,
gpus=1,
precision=16,
)
trainer.tune(model)
batch_size_after = model.batch_size
assert trainer.amp_backend == AMPType.NATIVE
assert trainer.scaler is not None
assert batch_size_after != batch_size_before
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.