blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
6e2360ce8f15c59519be10586424bb363a340a2a
|
Rust
|
andrew-d/interfaces-rs
|
/src/error.rs
|
UTF-8
| 2,018 | 3.328125 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::convert::From;
use std::error::Error;
use std::fmt;
use nix;
/// InterfacesError is the error type that is returned by all functions in this crate. See the
/// documentation on the individual variants for more information.
#[derive(Debug)]
pub enum InterfacesError {
/// Errno indicates that something went wrong with an underlying syscall. The internal value
/// is the `errno` that was returned.
Errno(nix::errno::Errno),
/// NotSupported indicates that something required for this operation is not currently
/// supported on this platform or computer. The internal string may contain more detail.
NotSupported(&'static str),
}
impl InterfacesError {
/// Create a new instance of `InterfacesError` with the error set to the current value of the
/// libc `errno` variable.
pub fn last_os_error() -> InterfacesError {
return InterfacesError::Errno(nix::errno::Errno::last());
}
}
impl From<nix::errno::Errno> for InterfacesError {
fn from(e: nix::errno::Errno) -> InterfacesError {
InterfacesError::Errno(e)
}
}
impl Error for InterfacesError {
fn description(&self) -> &str {
use InterfacesError::*;
match *self {
Errno(..) => "A syscall error occured",
NotSupported(..) => "A required feature is not supported",
}
}
}
impl fmt::Display for InterfacesError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use InterfacesError::*;
match *self {
Errno(ref err) => write!(f, "Errno({})", err.desc()),
NotSupported(msg) => write!(f, "NotSupported({})", msg),
}
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use std::fmt;
use super::*;
#[test]
fn test_error_has_traits() {
let e = InterfacesError::last_os_error();
assert_is_error(&e);
assert_is_display(&e);
}
fn assert_is_error<T: Error>(_: &T) {}
fn assert_is_display<T: fmt::Display>(_: &T) {}
}
| true |
af02a046649f2a6628f3b6192cec1fc386dc7609
|
Rust
|
mijnadres/elm_export
|
/src/lib.rs
|
UTF-8
| 2,453 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
//! Provides easy communication between [Elm](http://elm-lang.org/) and
//! [Rust](https://www.rust-lang.org/en-US/) by leveraging
//! [syn](https://crates.io/crates/syn).
//!
//! ## Usage
//! Notice that at the moment some of this is dreamcode.
//!
//! Lets say we have some models in Rust.
//!
//! ```rust
//! enum Message {
//! FriendRequest(User),
//! Message(User, String),
//! }
//!
//! struct User {
//! name: String
//! }
//! ```
//!
//! We want to generated the corresponding models in Elm. For this we need a
//! dependency on the `elm_export` crate. Include the following line in your
//! `Cargo.toml`.
//!
//! ```toml
//! [dependencies]
//! elm_export = "0.1.0"
//! ```
//!
//! Next we need to make our project aware of the crate and the functionality it
//! exposes. Import it in either `main.rs` or `lib.rs`. Don't forget to annotate the
//! import with the `macro_use` annotation.
//!
//! ```text
//! #[macro_use]
//! extern crate elm_export;
//! ```
//!
//! Now it is time to derive the corresponding models in Elm. Annotate the models
//! with `derive(Elm)`.
//!
//! ```text
//! #[derive(Elm)]
//! enum Message {
//! FriendRequest(User),
//! Message(User, String),
//! }
//!
//! #[derive(Elm)]
//! struct User {
//! name: String
//! }
//! ```
//!
//! Now every time your models change and you compile your code the corresponding
//! Elm models will be generated. You can find them in the `generated` directory in
//! your projects root. The are called after the corresponding Rust definitions.
//! I.e. `Message.elm` and `User.elm` in this case.
//!
//! ```elm
//! module Message exposing (..)
//!
//! type Message =
//! FriendRequest User
//! | Message User String
//! ```
//!
//! ```elm
//! module User exposing (..)
//!
//! type alias User =
//! {
//! name: String
//! }
//! ```
extern crate proc_macro;
mod derive;
mod elm;
mod representation;
use proc_macro::TokenStream;
use syn::{DeriveInput, parse_macro_input};
/// Marker trait that allows to tie in the procedural macro tool chain.
trait Elm {}
/// Writes Elm model, serializers and deserializers to the `generated`
/// directory.
#[proc_macro_derive(Elm)]
pub fn generate_elm(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
derive::generate_elm(input).expect("to be able to generate elm module");
empty_token_stream()
}
fn empty_token_stream() -> TokenStream {
"".parse().unwrap()
}
| true |
10f8544a041e2452829b5146554733a5bf09709f
|
Rust
|
thisisian/rspaint
|
/src/controller/color.rs
|
UTF-8
| 771 | 3.046875 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
extern crate cairo;
#[derive(Clone)]
pub struct RGBColor(u8, u8, u8);
pub const BLACK: RGBColor = RGBColor(0, 0, 0);
pub const WHITE: RGBColor = RGBColor(128, 128, 128);
impl RGBColor {
pub fn get_rgb(&self) -> (u8, u8, u8) {
(self.0, self.1, self.2)
}
pub fn new(r: u8, g: u8, b: u8) -> RGBColor {
RGBColor (r,g,b)
}
pub fn set_rgb(&mut self, r: u8, g: u8, b: u8) {
self.0 = r;
self.1 = g;
self.2 = b;
}
pub fn as_cairo_pattern(&self) -> cairo::SolidPattern {
cairo::SolidPattern::from_rgb(self.0 as f64 / 128., self.1 as f64 / 128., self.2 as f64 / 128.)
}
pub fn as_usize(&self) -> usize {
(self.0 as usize) << 16 | (self.1 as usize) << 8 | (self.2) as usize
}
}
| true |
8ee1899366dc9bc366cf5a16872af748b615847d
|
Rust
|
Alex6614/rust-qlearning
|
/src/structs.rs
|
UTF-8
| 703 | 3.4375 | 3 |
[] |
no_license
|
#[derive(Hash, Eq, PartialEq, Debug)]
/// Describes the current state at which the system is in. At the moment it only allows for one i32 variable, but this can be extended easily.
pub struct State {
x_1: i32,
}
impl State {
// Create a new State
pub fn new(x_1: i32) -> State {
State { x_1: x_1}
}
}
/// A tuple of a state and an action (which is a string), which is used as a key in the q_values HashMap to find the value of taking a certain action while in a certain state
#[derive(Hash, Eq, PartialEq, Debug)]
pub struct Key {
state: State,
action: String,
}
impl Key {
// Create a new State
pub fn new(state: State, action: String) -> Key {
Key { state: state, action: action }
}
}
| true |
be60205752e4b1918fd0c63d9d2802ced95e191b
|
Rust
|
michimani/Project-Euler-Solutions
|
/rust/src/utils/prime.rs
|
UTF-8
| 1,859 | 4.21875 | 4 |
[
"MIT"
] |
permissive
|
/// Returns the number is prime number.
///
/// # Example
/// ```
/// assert_eq!(false, is_prime_number(1));
/// assert_eq!(true, is_prime_number(2));
/// assert_eq!(false, is_prime_number(100));
/// assert_eq!(true, is_prime_number(104729));
/// ```
pub fn is_prime_number(num: usize) -> bool {
if num < 2 {
return false;
}
let to_div = (num as f64).powf(0.5) as usize;
for p in 2..num {
if p > to_div {
break;
}
if num % p == 0 {
return false;
}
}
return true;
}
/// Generates prime numbers until limit.
///
/// # Example
/// ```
/// assert_eq!([2, 3, 5].to_vec(), generate_prime_numbers(5));
/// assert_eq!(
/// [2, 3, 5, 7, 11, 13, 17, 19].to_vec(),
/// generate_prime_numbers(20)
/// );
/// ```
pub fn generate_prime_numbers(limit: usize) -> Vec<usize> {
let mut primes: Vec<usize> = Vec::new();
for num in 2..limit + 1 {
let mut is_prime = true;
let to_div = ((num as f64).powf(0.5)) as usize;
for p in &primes {
if *p > to_div {
break;
}
if num % *p == 0 {
is_prime = false;
break;
}
}
if is_prime {
primes.push(num);
}
}
return primes;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_prime_number() {
assert_eq!(false, is_prime_number(1));
assert_eq!(true, is_prime_number(2));
assert_eq!(false, is_prime_number(100));
assert_eq!(true, is_prime_number(104729));
}
#[test]
fn test_generate_prime_numbers() {
assert_eq!([2, 3, 5].to_vec(), generate_prime_numbers(5));
assert_eq!(
[2, 3, 5, 7, 11, 13, 17, 19].to_vec(),
generate_prime_numbers(20)
);
}
}
| true |
f7e18c0607e32bcab9deaca7744f5c09febebb10
|
Rust
|
andywarduk/aoc2020
|
/day23-2/src/main.rs
|
UTF-8
| 1,626 | 3.546875 | 4 |
[] |
no_license
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
play("315679824", 1_000_000, 10_000_000);
Ok(())
}
fn play(start_str: &str, size: usize, moves: usize) {
let mut nexts: Vec<usize> = Vec::new();
for _ in 0..=size + 1 {
nexts.push(0)
}
let start_chars: Vec<char> = start_str.chars().collect();
let mut start = start_chars[0].to_digit(10).unwrap() as usize;
for i in 1..start_chars.len() {
let prev = start_chars[i - 1].to_digit(10).unwrap() as usize;
let next = start_chars[i].to_digit(10).unwrap() as usize;
nexts[prev] = next;
}
let mut prev = start_chars[start_chars.len() - 1].to_digit(10).unwrap() as usize;
for i in start_chars.len() + 1..=size {
nexts[prev] = i;
prev = i;
}
nexts[prev] = start;
for _ in 0..moves {
let int1 = nexts[start];
let int2 = nexts[int1];
let int3 = nexts[int2];
// Get next start number
let next_start = nexts[int3];
// Chop out the 3 numbers
nexts[start] = next_start;
// Work out destination
let mut dest = start - 1;
loop {
if dest == 0 {
dest = size;
}
if dest != int1 && dest != int2 && dest != int3 {
break
}
dest -= 1;
}
// Insert chopped numbers at destintion
nexts[int3] = nexts[dest];
nexts[dest] = int1;
// Move to next start
start = next_start;
}
println!("{} * {} = {}", nexts[1], nexts[nexts[1]], nexts[1] * nexts[nexts[1]]);
}
| true |
b21b973ac7f5985bdc2771a6635e3b39f7653ebe
|
Rust
|
gdesmott/gst-log-parser
|
/examples/omx-perf.rs
|
UTF-8
| 3,023 | 2.65625 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
// Copyright (C) 2017-2019 Guillaume Desmottes <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Generate input logs with: GST_DEBUG="OMX_API_TRACE:8"
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::process::exit;
use gst_log_parser::parse;
use gstreamer::DebugLevel;
use structopt::StructOpt;
#[derive(StructOpt)]
#[structopt(
name = "omx-perf",
about = "Generate a data file from OMX performance logs"
)]
struct Opt {
#[structopt(help = "Input file")]
input: String,
#[structopt(help = "Output file")]
output: String,
}
struct Count {
empty_call: u32,
empty_done: u32,
fill_call: u32,
fill_done: u32,
}
impl Count {
fn new() -> Count {
Count {
fill_call: 0,
fill_done: 0,
empty_call: 0,
empty_done: 0,
}
}
}
fn generate() -> Result<bool, std::io::Error> {
let opt = Opt::from_args();
let input = File::open(opt.input)?;
let mut output = (File::create(&opt.output))?;
let parsed = parse(input)
.filter(|entry| entry.category == "OMX_API_TRACE" && entry.level == DebugLevel::Trace);
let mut counts: HashMap<String, Count> = HashMap::new();
for entry in parsed {
println!("{}", entry);
let s = entry
.message_to_struct()
.expect("Failed to parse structure");
let object = entry.object.unwrap();
// Extract the component name by taking the 4th last chars of the gst object name
if let Some((i, _)) = object.char_indices().rev().nth(3) {
let comp_name = &object[i..];
let event = s.name();
let count = counts
.entry(comp_name.to_string())
.or_insert_with(Count::new);
match event.as_str() {
"EmptyThisBuffer" => count.empty_call += 1,
"EmptyBufferDone" => count.empty_done += 1,
"FillThisBuffer" => count.fill_call += 1,
"FillBufferDone" => count.fill_done += 1,
_ => continue,
}
let ts = entry.ts.nseconds();
writeln!(output, "{}_{} 1 {}", comp_name, event, ts)?;
writeln!(output, "{}_{} 0 {}", comp_name, event, ts + 1)?;
}
}
for (comp, count) in &counts {
println!("{}:", comp);
println!(
"\tInput (EmptyBufferDone/EmptyThisBuffer): {}/{}",
count.empty_done, count.empty_call
);
println!(
"\tOutput (FillBufferDone/FillThisBuffer): {}/{}",
count.fill_done, count.fill_call
);
}
println!("Generated {}", opt.output);
Ok(true)
}
fn main() {
if generate().is_err() {
exit(1);
}
}
| true |
c154a785690df4c160a377bf3a52e9c87d15ef6a
|
Rust
|
Ruin0x11/sabi
|
/src/terrain/mod.rs
|
UTF-8
| 2,588 | 2.6875 | 3 |
[] |
no_license
|
use std::collections::HashMap;
use world::Bounds;
use chunk::*;
use chunk::serial::SerialChunk;
use prefab::Markers;
use world::WorldPosition;
use infinigen::*;
pub mod traits;
pub mod regions;
use self::regions::Regions;
use self::traits::*;
impl BoundedTerrain<WorldPosition, ChunkIndex> for Terrain {
fn in_bounds(&self, pos: &WorldPosition) -> bool {
self.bounds.in_bounds(pos)
}
fn index_in_bounds(&self, index: &ChunkIndex) -> bool {
self.bounds.index_in_bounds(index)
}
}
impl Index for ChunkIndex {
fn x(&self) -> i32 { self.0.x }
fn y(&self) -> i32 { self.0.y }
}
#[derive(Serialize, Deserialize)]
pub struct Terrain {
regions: Regions,
chunks: HashMap<ChunkIndex, Chunk>,
bounds: Bounds,
pub markers: Markers,
pub id: u32,
}
impl Terrain {
pub fn new(bounds: Bounds, id: u32) -> Self {
Terrain {
regions: Regions::new(id),
chunks: HashMap::new(),
bounds: bounds,
markers: Markers::new(),
id: id,
}
}
pub fn set_id(&mut self, id: u32) {
self.id = id;
self.regions.set_id(id);
}
}
impl TerrainQuery for Terrain {
fn chunk(&self, index: ChunkIndex) -> Option<&Chunk> {
self.chunks.get(&index)
}
fn pos_loaded(&self, pos: &WorldPosition) -> bool {
self.cell(pos).is_some() && self.bounds.in_bounds(pos)
}
}
impl TerrainMutate for Terrain {
fn prune_empty_regions(&mut self) {
self.regions.prune_empty();
}
fn chunk_mut(&mut self, index: ChunkIndex) -> Option<&mut Chunk> {
self.chunks.get_mut(&index)
}
fn insert_chunk(&mut self, index: ChunkIndex, chunk: Chunk) {
self.chunks.insert(index, chunk);
// NOTE: cells are not cropped at bounds, but since there is a bounds
// check the squares out of bounds are treated as "None"
}
fn remove_chunk(&mut self, index: &ChunkIndex) -> Option<Chunk> {
self.chunks.remove(index)
}
}
impl<'a> ChunkedTerrain<'a, ChunkIndex, SerialChunk, Regions> for Terrain
where Region<ChunkIndex>: ManagedRegion<'a, ChunkIndex, SerialChunk> {
fn regions_mut(&mut self) -> &mut Regions {
assert_eq!(self.regions.id, self.id);
&mut self.regions
}
fn chunk_count(&self) -> usize {
self.chunks.len()
}
fn chunk_loaded(&self, index: &ChunkIndex) -> bool {
self.chunk(*index).is_some()
}
fn chunk_indices(&self) -> Vec<ChunkIndex> {
self.chunks.iter().map(|(&i, _)| i).collect()
}
}
| true |
9e518050cd6c543935a80ea38b6f50f40faf99d9
|
Rust
|
sycer-dev/kwp
|
/tests/main.rs
|
UTF-8
| 1,717 | 3.125 | 3 |
[
"Apache-2.0"
] |
permissive
|
use kwp::{Parser, Prefixes};
#[test]
fn basic_text() {
let parser = Parser::new(
"+foo,-bar,+baz",
Prefixes {
positive: "+",
negative: "-",
},
);
let keywords = parser.parse();
assert_eq!(keywords.positive, vec!["foo", "baz"]);
assert_eq!(keywords.negative, vec!["bar"]);
}
#[test]
fn do_not_retain_prefix() {
let mut parser = Parser::new(
"+foo,-bar,+baz",
Prefixes {
positive: "+",
negative: "-",
},
);
parser.should_retain_prefix(false);
let keywords = parser.parse();
assert_eq!(keywords.positive, vec!["foo", "baz"]);
assert_eq!(keywords.negative, vec!["bar"]);
}
#[test]
fn weird_prefixes() {
let parser = Parser::new(
"yes!!foo,no!!bar,yes!!baz",
Prefixes {
positive: "yes!!",
negative: "no!!",
},
);
let keywords = parser.parse();
assert_eq!(keywords.positive, vec!["foo", "baz"]);
assert_eq!(keywords.negative, vec!["bar"]);
}
#[test]
fn unparsed() {
let parser = Parser::new(
"+foo,-bar,+baz,bak",
Prefixes {
positive: "+",
negative: "-",
},
);
let keywords = parser.parse();
assert_eq!(keywords.other, vec!["bak"]);
}
#[test]
fn basic_products() {
let products = vec!["MyProduct Adult", "MyProduct Youth"];
let parser = Parser::new(
"+myproduct,-youth",
Prefixes {
positive: "+",
negative: "-",
},
);
let keywords = parser.parse();
let products = parser.match_products(products.clone(), keywords.clone());
assert_eq!(products, vec!["MyProduct Adult"]);
}
| true |
76a7a3b8ce71d11d21049f8eaeb91ac2a176cb90
|
Rust
|
RustyGecko/sensor-tracker
|
/src/cmdparse.rs
|
UTF-8
| 1,481 | 3.25 | 3 |
[] |
no_license
|
use core::prelude::*;
use core::str::FromStr;
use collections::vec::Vec;
use collections::string::String;
use modules::Usart;
const NL: u8 = '\n' as u8;
const CR: u8 = '\r' as u8;
const BS: u8 = 8u8;
#[derive(Debug)]
pub enum Cmd {
Read(u32),
Write(u32),
Unknown
}
pub fn get_command() -> Cmd {
print_prompt();
parse(get_line())
}
fn print_prompt() {
let usart: Usart = Default::default();
usart.write_line("> ");
}
fn get_line() -> String {
let usart: Usart = Default::default();
let mut line = String::with_capacity(8);
loop {
let ch = usart.getc();
if ch == CR {
usart.putc(NL);
usart.putc(CR);
break;
} else if ch == BS {
usart.putc(BS);
usart.putc(BS);
line.pop();
} else {
if is_printable(ch) {
line.push(ch as char);
usart.putc(ch);
}
}
}
line
}
fn is_printable(ch: u8) -> bool{
ch >= ' ' as u8 && ch <= '~' as u8
}
fn parse(line: String) -> Cmd {
let tokens: Vec<&str> = line.split(' ').collect();
match tokens.as_ref() {
["w", num] => match FromStr::from_str(num) {
Ok(num) => Cmd::Write(num),
_ => Cmd::Unknown
},
["r", num] => match FromStr::from_str(num) {
Ok(num) => Cmd::Read(num),
_ => Cmd::Unknown
},
_ => Cmd::Unknown
}
}
| true |
3d44e7349ff354a1e149725e786fb82b02326ec2
|
Rust
|
nazeudon/rust_tutrial
|
/10_1_generic/src/main.rs
|
UTF-8
| 1,023 | 4.125 | 4 |
[] |
no_license
|
fn main() {
let number_list = vec![34, 50, 25, 100, 65];
// let mut largest = number_list[0];
// for number in number_list {
// if number > largest {
// largest = number
// }
// }
let result = largest(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['a', 'm', 'b', 'q'];
let result = largest_char(&char_list);
println!("The largest char is {}", result);
let p = Point { x: 5, y: 10 };
println!("p.x = {}", p.x())
}
fn largest<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item
}
}
largest
}
fn largest_char(list: &[char]) -> char {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
| true |
aa4e42e71b670b630f1dae81586f4eeb2456a3f4
|
Rust
|
brayniac/rips
|
/src/macros.rs
|
UTF-8
| 375 | 2.765625 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
#[macro_export]
/// Macro for sending on a Tx until it does not return an `TxError::InvalidTx`.
macro_rules! tx_send {
($create:expr; $($arg:expr),*) => {{
let mut result = Err(TxError::InvalidTx);
while let Err(TxError::InvalidTx) = result {
let mut tx = $create();
result = tx.send($($arg),*);
}
result
}};
}
| true |
1f7b37d2900d32e6fc2903aa6526e3b5c693ab1d
|
Rust
|
lineCode/tuix
|
/widgets/src/dropdown.rs
|
UTF-8
| 9,473 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
use crate::common::*;
use crate::{CheckButton, CheckboxEvent, Label, Popup, PopupEvent, List};
const ICON_DOWN_DIR: &str = "\u{25be}";
#[derive(Debug, Clone, PartialEq)]
pub enum DropdownEvent {
SetText(String),
}
pub struct DropdownItem {
//checkbox: Entity,
text: String,
pressed: bool,
}
impl DropdownItem {
pub fn new(txt: &str) -> Self {
DropdownItem {
text: txt.to_string(),
pressed: false,
}
}
}
impl Widget for DropdownItem {
type Ret = Entity;
type Data = ();
fn on_build(&mut self, state: &mut State, entity: Entity) -> Self::Ret {
entity.set_text(state, &self.text).class(state, "item");
//self.checkbox = Checkbox::new(false).build(state, entity, |builder| builder.set_hoverable(false));
// Element::new().build(state, entity, |builder| {
// builder.set_text(&self.text).set_flex_grow(1.0).set_hoverable(false)
// });
entity
}
fn on_event(&mut self, state: &mut State, entity: Entity, event: &mut Event) {
if let Some(window_event) = event.message.downcast::<WindowEvent>() {
match window_event {
WindowEvent::MouseDown(button) => {
if *button == MouseButton::Left {
if entity == event.target {
self.pressed = true;
}
}
}
WindowEvent::MouseUp(button) => {
if *button == MouseButton::Left {
if self.pressed {
self.pressed = false;
//self.checkbox.set_checked(state, true);
// state.insert_event(
// Event::new(CheckboxEvent::Switch)
// .target(self.checkbox)
// .propagate(Propagation::Direct),
// );
state.insert_event(
Event::new(DropdownEvent::SetText(self.text.clone()))
.target(entity)
.propagate(Propagation::Up),
);
}
}
}
_ => {}
}
}
}
}
pub struct Dropdown<T> {
button: CheckButton,
pub container: Entity,
pub header: Entity,
pub label: Entity,
text: String,
pub value: T,
}
impl<T: 'static + Clone + Default> Dropdown<T> {
pub fn new(text: &str) -> Self {
Dropdown {
button: CheckButton::default(),
container: Entity::null(),
header: Entity::null(),
label: Entity::null(),
text: text.to_string(),
value: T::default(),
}
}
}
impl<T: 'static + Clone> Widget for Dropdown<T> {
type Ret = Entity;
type Data = T;
fn on_build(&mut self, state: &mut State, entity: Entity) -> Self::Ret {
// self.header = Element::new().build(state, entity, |builder| {
// builder
// //.set_background_color(Color::rgb(100,100,50))
// .set_hoverable(false)
// .set_focusable(false)
// .set_layout_type(LayoutType::Row)
// .set_width(Stretch(1.0))
// .set_height(Stretch(1.0))
// .class("header")
// });
self.label = Label::new(&self.text).build(state, entity, |builder| {
builder
//.set_background_color(Color::rgb(100,50,50))
.set_hoverable(false)
.set_focusable(false)
.set_width(Stretch(1.0))
.set_height(Stretch(1.0))
.class("label")
});
// Icon
Element::new().build(state, entity, |builder| {
builder
.set_font("icons")
.set_hoverable(false)
.set_focusable(false)
//.set_background_color(Color::rgb(100,100,100))
.set_text(ICON_DOWN_DIR)
.set_width(Pixels(20.0))
.set_height(Pixels(20.0))
.set_top(Stretch(1.0))
.set_bottom(Stretch(1.0))
.set_child_space(Stretch(1.0))
.class("icon")
});
self.container = Popup::new().build(state, entity, |builder| {
builder
.set_position_type(PositionType::SelfDirected)
.set_top(Percentage(100.0))
//.set_width(Auto)
.set_height(Auto)
.set_z_order(1)
.set_clip_widget(Entity::root())
.class("container")
});
let list = List::new().build(state, self.container, |builder|
builder
.set_height(Auto)
);
entity.set_element(state, "dropdown").set_layout_type(state, LayoutType::Row);
// (entity, self.header, self.container)
let container = self.container;
self.button = CheckButton::new().on_checked(move |_, state, _|
state.insert_event(
Event::new(PopupEvent::Open).target(container)
)
)
.on_unchecked(move |_, state, _|
state.insert_event(
Event::new(PopupEvent::Close).target(container)
)
);
list
}
fn on_update(&mut self, _state: &mut State, _entity: Entity, data: &Self::Data) {
self.value = data.clone();
}
fn on_event(&mut self, state: &mut State, entity: Entity, event: &mut Event) {
self.button.on_event(state, entity, event);
if let Some(dropdown_event) = event.message.downcast() {
//if event.target == entity {
match dropdown_event {
DropdownEvent::SetText(text) => {
self.label.set_text(state, text);
}
}
}
if let Some(popup_event) = event.message.downcast() {
match popup_event {
PopupEvent::Close => {
entity.emit(state, CheckboxEvent::Uncheck);
}
_=> {}
}
}
if let Some(window_event) = event.message.downcast::<WindowEvent>() {
match window_event {
// WindowEvent::MouseDown(button) => match button {
// MouseButton::Left => {
// if event.target == entity || event.target == self.header {
// }
// }
// _ => {}
// },
// WindowEvent::MouseCaptureOutEvent => {
// self.open = false;
// self.header.set_disabled(state, true);
// state
// .style
// .opacity
// .play_animation(self.container, self.fade_out_animation);
// self.container.set_opacity(state, 0.0);
// }
// WindowEvent::MouseCaptureEvent => {
// self.open = true;
// self.header.set_enabled(state, true);
// state
// .style
// .opacity
// .play_animation(self.container, self.fade_in_animation);
// self.container.set_opacity(state, 1.0);
// // Shouldn't need to do this but it's required for some reason. TODO: Investigate
// self.container.set_z_order(state, 1);
// }
// WindowEvent::MouseUp(button) => match button {
// MouseButton::Left => {
// if (event.target == entity || event.target == self.header)
// && event.origin != entity
// {
// if state.mouse.left.pressed == state.hovered {
// if !self.open {
// state.capture(entity);
// } else {
// state.release(entity);
// }
// state.insert_event(
// Event::new(WindowEvent::MouseUp(*button))
// .target(state.hovered)
// .origin(entity)
// .propagate(Propagation::Direct),
// );
// }
// }
// }
// _ => {}
// },
// WindowEvent::KeyDown(code, key) => match code {
// Code::Escape => {
// state.insert_event(
// Event::new(WindowEvent::KeyDown(*code, key.clone()))
// .target(self.container)
// .propagate(Propagation::Direct),
// );
// }
// _ => {}
// },
_ => {}
}
}
}
}
| true |
a2b96b908218155287df5dfbae924f1b3def14ec
|
Rust
|
singee-study/rust-minigrep
|
/src/app.rs
|
UTF-8
| 390 | 2.546875 | 3 |
[] |
no_license
|
pub mod utils;
pub use self::utils::search;
use crate::config::Config;
use std::error::Error;
use std::fs::read_to_string;
pub fn run(config: Config) -> Result<(), Box<dyn Error>> {
let file_content = read_to_string(config.file_path)?;
let search_result = search(&config.target, &file_content);
for line in search_result {
println!("{}", line);
}
Ok(())
}
| true |
c01ae01589656a6bd011a2588d80de27d6a610cf
|
Rust
|
kindlychung/stool
|
/src/widget_controller.rs
|
UTF-8
| 875 | 2.65625 | 3 |
[] |
no_license
|
//! Controller widgets
use druid::widget::Controller;
use druid::{Env, Event, EventCtx, UpdateCtx, Widget};
use crate::AppData;
/// A widget that wraps all root widgets
#[derive(Debug, Default)]
pub struct RootWindowController;
impl<W: Widget<AppData>> Controller<AppData, W> for RootWindowController {
fn event(
&mut self,
child: &mut W,
ctx: &mut EventCtx,
event: &Event,
data: &mut AppData,
env: &Env,
) {
match event {
Event::WindowSize(size) => data.line_size = (size.width / 8.) as usize - 15,
other => child.event(ctx, other, data, env),
}
}
fn update(
&mut self,
child: &mut W,
ctx: &mut UpdateCtx,
old_data: &AppData,
data: &AppData,
env: &Env,
) {
child.update(ctx, old_data, data, env);
}
}
| true |
9dfead512951eaf6f69ea447dcb6286281b8419c
|
Rust
|
spriest487/advent-of-code-2018
|
/src/day_15.rs
|
UTF-8
| 11,855 | 2.96875 | 3 |
[] |
no_license
|
mod point;
mod astar;
use {
crate::{
astar::Pathfinder,
point::{
Point,
Neighbors,
},
},
std::{
fmt,
cmp::Ordering,
collections::{
HashSet,
HashMap,
},
time::Instant,
usize,
},
rayon::prelude::*,
};
pub struct ManhattanDistHeuristic;
impl astar::Heuristic for ManhattanDistHeuristic {
type Item = Point;
type Score = usize;
fn score(from: &Point, to: &Point) -> usize { from.manhattan_dist_to(*to) }
fn zero_score() -> usize { 0 }
fn infinity_score() -> usize { usize::MAX }
}
pub struct CavernWorld;
impl astar::World for CavernWorld {
type Point = Point;
type Score = usize;
type Neighbors = Neighbors;
type Heuristic = ManhattanDistHeuristic;
fn neighbors(origin: &Point) -> Neighbors { origin.neighbors_reading_order() }
fn neighbor_dist() -> usize { 1 }
fn point_order(a: &Point, b: &Point) -> Ordering { Point::cmp_reading_order(*a, *b) }
}
type CavernPathfinder = Pathfinder<CavernWorld>;
#[derive(Copy, Clone, Eq, PartialEq)]
enum Team {
Elf,
Goblin,
}
impl fmt::Display for Team {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", match self {
Team::Goblin => "Goblin",
Team::Elf => "Elf",
})
}
}
#[derive(Clone)]
struct Fighter {
team: Team,
pos: Point,
hp: isize,
}
const BASE_ATTACK_POWER: isize = 3;
impl Fighter {
fn new(team: Team, pos: Point) -> Self {
Self {
team,
pos,
hp: 200,
}
}
}
impl fmt::Debug for Fighter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Fighter ( {} @ {} )", self.team, self.pos)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Tile {
Empty,
Blocked,
}
impl fmt::Display for Tile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Tile::Empty => write!(f, "."),
Tile::Blocked => write!(f, "#"),
}
}
}
#[derive(Clone)]
struct Cavern {
tiles: Vec<Tile>,
width: usize,
height: usize,
fighters: Vec<Fighter>,
fighter_positions: HashMap<Point, usize>,
elf_attack_power: isize,
}
impl Cavern {
fn parse(s: &str) -> Self {
let mut width = 0;
let mut height = 0;
let mut fighters = Vec::new();
let mut tiles = Vec::new();
for (y, line) in s.lines().enumerate() {
height += 1;
width = line.len(); // assume all lines are the same length
for (x, char) in line.chars().enumerate() {
let point = Point::new(x as isize, y as isize);
match char {
'#' => tiles.push(Tile::Blocked),
'E' => {
tiles.push(Tile::Empty);
fighters.push(Fighter::new(Team::Elf, point));
}
'G' => {
tiles.push(Tile::Empty);
fighters.push(Fighter::new(Team::Goblin, point));
}
_ => tiles.push(Tile::Empty),
}
}
}
let mut cavern = Self {
tiles,
width,
height,
fighters,
fighter_positions: HashMap::new(),
elf_attack_power: BASE_ATTACK_POWER,
};
cavern.refresh_fighter_positions();
cavern
}
fn refresh_fighter_positions(&mut self) {
self.fighter_positions.clear();
for (i, f) in self.fighters.iter().enumerate() {
self.fighter_positions.insert(f.pos, i);
}
}
fn is_free_space(&self, point: Point) -> bool {
match self.tile_at(point) {
Tile::Empty => self.fighter_at(point).is_none(),
Tile::Blocked => false,
}
}
fn fighter_at(&self, point: Point) -> Option<usize> {
self.fighter_positions.get(&point)
.filter(|&&i| self.fighters[i].hp > 0)
.cloned()
}
fn tile_at(&self, point: Point) -> Tile {
let off = self.width as isize * point.y + point.x;
if off >= 0 && off < self.tiles.len() as isize {
self.tiles[off as usize]
} else {
Tile::Blocked
}
}
fn find_targets(&self, i: usize, targets: &mut Vec<usize>) {
targets.clear();
let fighter = &self.fighters[i];
targets.extend(self.fighters.iter().enumerate()
.filter(|(_, other)| other.hp > 0)
.filter_map(|(j, other)| if other.team != fighter.team {
Some(j)
} else {
None
}));
}
fn move_fighter(&mut self, i: usize, targets: &[usize], pathfinder: &mut CavernPathfinder) {
let fighter = &self.fighters[i];
let dests: HashSet<_> = targets.iter()
.flat_map(|j| {
let target_pos = self.fighters[*j].pos;
target_pos.neighbors_reading_order()
})
.filter(|p| self.is_free_space(*p) || *p == fighter.pos)
.collect();
if !dests.contains(&fighter.pos) {
let mut paths = Vec::new();
let origin_points = fighter.pos.neighbors_reading_order()
.filter(|p| self.is_free_space(*p));
let mut path = Vec::new();
for origin in origin_points {
for &dest in &dests {
let free_tile_pred = |p: &Point| self.is_free_space(*p);
if pathfinder.find_path(origin, dest, free_tile_pred, &mut path) {
paths.push(path.clone());
path.clear();
}
}
}
paths.sort_by(|a, b| {
let a_dest = *a.last().unwrap();
let b_dest = *b.last().unwrap();
// sort first by shortest paths...
match a.len().cmp(&b.len()) {
// then by origin pos in reading order
Ordering::Equal => Point::cmp_reading_order(a_dest, b_dest),
dest_order => dest_order,
}
});
if !paths.is_empty() {
// move this fighter to the first step of the chosen path
self.fighters[i].pos = paths[0][0];
self.refresh_fighter_positions();
}
}
}
fn resolve_attacks(&mut self, i: usize) {
let neighbors = self.fighters[i].pos.neighbors_reading_order();
let target_index = neighbors
.filter_map(|neighbor| {
self.fighters.iter().enumerate()
.filter_map(|(j, f)| {
if f.pos == neighbor
&& f.hp > 0
&& f.team != self.fighters[i].team {
Some(j)
} else {
None
}
})
.next()
})
.min_by(|a, b| {
let a = &self.fighters[*a];
let b = &self.fighters[*b];
match a.hp.cmp(&b.hp) {
Ordering::Equal => Point::cmp_reading_order(a.pos, b.pos),
hp_order => hp_order,
}
});
if let Some(j) = target_index {
let attack_power = match self.fighters[i].team {
Team::Elf => self.elf_attack_power,
Team::Goblin => BASE_ATTACK_POWER,
};
self.fighters[j].hp = isize::max(0, self.fighters[j].hp - attack_power);
}
}
fn tick(&mut self, pathfinder: &mut CavernPathfinder) -> Option<Team> {
let mut targets = Vec::new();
self.fighters.sort_by(|a, b| Point::cmp_reading_order(a.pos, b.pos));
self.refresh_fighter_positions();
for i in 0..self.fighters.len() {
if self.fighters[i].hp > 0 {
self.find_targets(i, &mut targets);
if targets.is_empty() {
let winner = self.fighters[i].team;
// all enemies are dead, battle is over
return Some(winner);
}
self.move_fighter(i, &targets, pathfinder);
self.resolve_attacks(i);
}
}
None
}
fn elves(&self) -> impl Iterator<Item=&Fighter> {
self.fighters.iter().filter(|f| f.hp > 0 && f.team == Team::Elf)
}
}
impl fmt::Display for Cavern {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for y in 0..self.height as isize {
for x in 0..self.width as isize {
let pos = Point::new(x, y);
match self.fighter_at(pos) {
Some(fighter_pos) => match self.fighters[fighter_pos].team {
Team::Elf => write!(f, "E")?,
Team::Goblin => write!(f, "G")?,
}
None => write!(f, "{}", self.tile_at(pos))?,
}
}
writeln!(f)?;
}
Ok(())
}
}
struct Outcome {
elf_power: isize,
elves_remaining: Vec<Fighter>,
winner: Team,
hp_sum: isize,
time: isize,
}
impl Outcome {
fn new(cavern: &Cavern, winner: Team, time: isize) -> Self {
let hp_sum = cavern.fighters.iter().map(|f| f.hp).sum::<isize>();
Self {
hp_sum,
elf_power: cavern.elf_attack_power,
elves_remaining: cavern.elves().cloned().collect(),
winner,
time,
}
}
fn value(&self) -> isize {
self.hp_sum * self.time
}
}
impl fmt::Display for Outcome {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}\t\tteam survived after {}\trounds * {}\t\tremaining HP = {},\telf power = {},\tsurviving elves = {}",
self.winner,
self.time,
self.hp_sum,
self.value(),
self.elf_power,
self.elves_remaining.len())
}
}
fn main() {
let input = include_str!("day_15.txt");
let initial_state = Cavern::parse(input);
let total_start_time = Instant::now();
let initial_elves = initial_state.elves().count();
let chunk_size: isize = 8;
let mut winning_outcomes = (0..).filter_map(|chunk| {
let chunk_outcomes: Vec<Outcome> = (0..chunk_size).into_par_iter()
.map(|i| {
let mut pathfinder = CavernPathfinder::new();
let attack_boost = (chunk_size * chunk + i) as isize;
let mut cavern = initial_state.clone();
cavern.elf_attack_power += attack_boost;
let mut time = 0;
loop {
if let Some(winner) = cavern.tick(&mut pathfinder) {
break Outcome::new(&cavern, winner, time);
} else {
time += 1;
}
}
})
.collect();
chunk_outcomes.into_iter()
.inspect(|outcome| println!("{}", outcome))
.find(|outcome| outcome.elves_remaining.len() == initial_elves)
});
let winning_outcome = winning_outcomes.next().unwrap();
println!("final outcome: {}", winning_outcome);
for elf in &winning_outcome.elves_remaining {
println!(" surviving elf with {} HP", elf.hp);
}
let total_elapsed = Instant::now() - total_start_time;
println!("elapsed time: {}.{}s", total_elapsed.as_secs(), total_elapsed.subsec_millis());
}
| true |
a0f3de490c25093358f07f103c9c0e54caac5d2c
|
Rust
|
DuBistKomisch/jakebarnes
|
/src/home.rs
|
UTF-8
| 434 | 2.75 | 3 |
[] |
no_license
|
use chrono::{Local, TimeZone};
use rocket::get;
use rocket_dyn_templates::Template;
use serde::Serialize;
const SECONDS_PER_YEAR: i64 = 31_557_600; // 365.25 * 24 * 60 * 60;
#[derive(Serialize)]
struct HomeContext {
age: i64
}
#[get("/")]
pub fn get() -> Template {
let age = Local::today().signed_duration_since(Local.ymd(1992, 8, 19)).num_seconds() / SECONDS_PER_YEAR;
Template::render("home", HomeContext { age })
}
| true |
e814d15b6e26bfcefe8a5a3bc2f897ffb60203a2
|
Rust
|
tov/broadword-rs
|
/src/lib.rs
|
UTF-8
| 11,603 | 3.4375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
#![doc(html_root_url = "https://docs.rs/broadword/0.2.2")]
//! Broadword operations treat a `u64` as a parallel vector of eight `u8`s or `i8`s.
//! This module also provides a population count function [`count_ones`](fn.count_ones.html) and a
//! select function [`select1`](fn.select1.html).
//!
//! The algorithms here are from [Sebastiano Vigna, “Broadword Implementation of
//! Rank/Select Queries,”](http://sux.di.unimi.it/paper.pdf) but with several changes from
//! that work:
//!
//! - Vigna uses a 17-digit (68-bit) constant “0x0F0F0F0F0F0F0F0F0.” I believe
//! the correct constant is these 64 bits: 0x0F0F_0F0F_0F0F_0F0F.
//!
//! - Arithmetic operations are assumed to wrap on overflow. If this
//! were not the case, Algorithm 1 ([count_ones](fn.count_ones.html))
//! would overflow its last line, when multiplying by L₈.
//!
//! - Line 2 of Algorithm 2 should read
//!
//! ```
//! # let mut s: u64 = 0;
//! s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
//! ```
//!
//! In the paper, the shifted `s` appears as `x`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
/// Has the lowest bit of every byte set: `0x0101_0101_0101_0101`.
pub const L8: u64 = 0x0101_0101_0101_0101;
/// Has the highest bit of every byte set: `0x8080_8080_8080_8080`.
pub const H8: u64 = 0x8080_8080_8080_8080;
/// Counts the number of ones in a `u64`.
///
/// Branchless. Uses the broadword algorithm from Vigna.
///
/// # Examples
///
/// ```
/// use broadword::count_ones;
///
/// assert_eq!( count_ones(0x0000_0000_0000_0000), 0 );
/// assert_eq!( count_ones(0x0000_0001_0000_0000), 1 );
/// assert_eq!( count_ones(0x0000_0001_0400_0000), 2 );
/// assert_eq!( count_ones(0x0000_0001_0600_0000), 3 );
/// assert_eq!( count_ones(0x3333_0001_0600_0000), 11 );
/// ```
#[inline]
pub fn count_ones(mut x: u64) -> usize {
x = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
x = (x & 0x3333_3333_3333_3333) + ((x >> 2) & 0x3333_3333_3333_3333);
x = (x + (x >> 4)) & 0x0F0F_0F0F_0F0F_0F0F;
(x.wrapping_mul(L8) >> 56) as usize
}
/// Finds the index of the `r`th one bit in `x`.
///
/// Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
///
/// # Examples
///
/// ```
/// use broadword::select1;
///
/// assert_eq!( select1(0, 0x0000_0000_0000_0000), None );
/// assert_eq!( select1(0, 0x0000_0000_0000_0001), Some(0) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0002), Some(1) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0004), Some(2) );
/// assert_eq!( select1(2, 0x0000_0000_0000_0004), None );
/// assert_eq!( select1(2, 0x0000_1010_1010_0114), Some(8) );
/// assert_eq!( select1(3, 0x0000_1010_1010_0114), Some(20) );
/// assert_eq!( select1(4, 0x0000_1010_1010_0114), Some(28) );
/// ```
#[inline]
pub fn select1(r: usize, x: u64) -> Option<usize> {
let result = select1_raw(r, x);
if result == 72 {None} else {Some(result)}
}
/// Finds the index of the `r`th one bit in `x`, returning 72 when not found.
///
/// Branchless. Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
#[inline]
#[allow(clippy::many_single_char_names)]
pub fn select1_raw(r: usize, x: u64) -> usize {
let r = r as u64;
let mut s = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
s = ((s + (s >> 4)) & 0x0F0F_0F0F_0F0F_0F0F).wrapping_mul(L8);
let b = (i_le8(s, r.wrapping_mul(L8)) >> 7).wrapping_mul(L8)>> 53 & !7;
let l = r - ((s << 8).wrapping_shr(b as u32) & 0xFF);
s = (u_nz8((x.wrapping_shr(b as u32) & 0xFF)
.wrapping_mul(L8) & 0x8040_2010_0804_0201) >> 7)
.wrapping_mul(L8);
(b + ((i_le8(s, l.wrapping_mul(L8)) >> 7).wrapping_mul(L8) >> 56)) as usize
}
/// Parallel ≤, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_le8;
///
/// assert_eq!( u_le8(0x03_03_04_17_92_A0_A0_A1,
/// 0x04_03_03_92_17_A0_A0_A0),
/// 0x80_80_00_80_00_80_80_00 );
/// ```
#[inline]
pub fn u_le8(x: u64, y: u64) -> u64 {
((((y | H8) - (x & !H8)) | (x ^ y)) ^ (x & !y)) & H8
}
/// Parallel ≤, treating a `u64` as a vector of 8 `i8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::i_le8;
///
/// assert_eq!( i_le8(0x03_03_04_00_FF_A0_A0_A1,
/// 0x04_03_03_FF_00_A0_A0_A0),
/// 0x80_80_00_00_80_80_80_00 );
/// ```
#[inline]
pub fn i_le8(x: u64, y: u64) -> u64 {
(((y | H8) - (x & !H8)) ^ x ^ y) & H8
}
/// Parallel >0, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_nz8;
///
/// assert_eq!( u_nz8(0x00_01_A9_40_20_17_00_06),
/// 0x00_80_80_80_80_80_00_80 );
#[inline]
pub fn u_nz8(x: u64) -> u64 {
(((x | H8) - L8) | x) & H8
}
#[cfg(test)]
#[allow(clippy::many_single_char_names)]
mod test {
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use quickcheck::TestResult;
use super::*;
#[test]
fn count_ones_0() {
assert_eq!(0, count_ones(0));
}
#[test]
fn count_ones_1() {
assert_eq!(1, count_ones(1));
}
#[test]
fn count_ones_0000_0000_0000_0010() {
assert_eq!(1, count_ones(0x0000_0000_0000_0010));
}
#[test]
fn count_ones_1000_0000_0000_0000() {
assert_eq!(1, count_ones(0x1000_0000_0000_0000));
}
#[test]
fn count_ones_ffff_ffff_ffff_ffff() {
assert_eq!(64, count_ones(0xFFFF_FFFF_FFFF_FFFF));
}
fn count_ones_prop_base(word: u64) -> bool {
count_ones(word) == word.count_ones() as usize
}
quickcheck! {
fn count_ones_prop(word: u64) -> bool {
count_ones_prop_base(word)
}
fn count_ones_prop_hash(word: u64) -> bool {
count_ones_prop_base(hash(&word))
}
}
#[test]
fn select1_0_0() {
assert_eq!(None, select1(0, 0));
}
#[test]
fn select1_0_1() {
assert_eq!(Some(0), select1(0, 1));
}
#[test]
fn select1_0_2() {
assert_eq!(Some(1), select1(0, 2));
}
#[test]
fn select1_0_3() {
assert_eq!(Some(0), select1(0, 3));
}
#[test]
fn select1_1_2() {
assert_eq!(None, select1(1, 2));
}
#[test]
fn select1_1_3() {
assert_eq!(Some(1), select1(1, 3));
}
#[test]
fn select1_3_13() {
assert_eq!(None, select1(3, 0b1101));
}
fn select1_slow(r: usize, x: u64) -> Option<usize> {
let mut count = 0;
for index in 0 .. 64 {
if (x >> index) & 1 == 1 {
count += 1;
}
if count == r + 1 {
return Some(index);
}
}
None
}
fn select1_prop_base(r: u8, x: u64) -> TestResult {
if r > 64 { return TestResult::discard(); }
TestResult::from_bool(
select1(r as usize, x) == select1_slow(r as usize, x))
}
quickcheck! {
fn select1_prop(r: u8, x: u64) -> TestResult {
select1_prop_base(r, x)
}
fn select1_prop_hash(r: u8, x: u64) -> TestResult {
select1_prop_base(r, hash(&x))
}
}
fn get_bits(x: u64, i: u8, n: u8) -> u64 {
let mask = if n == 64 {!0} else {(1 << n) - 1};
(x >> i) & mask
}
quickcheck! {
fn u_nz8_prop(argument: (u64, u64, u64, u64)) -> bool {
let n = hash(&argument);
let r = u_nz8(n);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni != 0) != (ri == 0x80) {
return false;
}
}
true
}
}
#[test]
fn u_nz8_works() {
assert_eq!(b(0, 0, 0, 0, 0, 0, 0, 0),
u_nz8(u(0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 1, 1, 0, 1, 0, 1, 1, 1),
u_nz8(u(45, 12, 0, 129, 0, 3, 80, 1)));
assert_eq!(b(1, 1, 1, 1, 1, 1, 1, 1),
u_nz8(u(1, 2, 3, 4, 5, 6, 7, 8)));
assert_eq!(b( 1, 1, 1, 1, 0, 1, 1, 1),
u_nz8(0xFF_FF_FF_FF_00_FF_FF_FF));
}
fn u_le8_prop_base(n: u64, m: u64) -> bool {
let r = u_le8(n, m);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let mi = get_bits(m, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni <= mi) != (ri == 0x80) {
return false;
}
}
true
}
quickcheck! {
fn u_le8_prop(n: u64, m: u64) -> bool {
u_le8_prop_base(n, m)
}
fn u_le8_prop_hashed(n: (u64, u64, u64, u64),
m: (u64, u64, u64, u64)) -> bool {
let n = hash(&n);
let m = hash(&m);
u_le8_prop_base(n, m)
}
}
#[test]
fn le8_works() {
assert_eq!(b( 1, 1, 1, 1, 0, 0, 0, 0),
i_le8(i(0, 0, 0, 0, 0, 0, 0, 0),
i( 3, 2, 1, 0, -1, -2, -3, -4)));
assert_eq!(b( 0, 0, 0, 1, 1, 1, 1, 1),
i_le8(i(3, 2, 1, 0, -1, -2, -3, -4),
i( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
i_le8(i(19, 18, 17, 16, 15, 0, -1, -2),
i(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 1, 1, 0, 0, 0, 0, 0, 0),
i_le8(i(-9, -8, -7, 0, 1, 2, 3, 4),
i(-8, -8, -8, -8, -8, -8, -8, -8)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
i_le8(i(8, 3, 46, 0, 0, 0, -6, -1),
i( 7, 3, 24, 1, 0, -9, 5, -2)));
}
#[test]
fn u_le8_works() {
assert_eq!(b( 1, 1, 1, 1, 1, 1, 1, 1),
u_le8(u( 0, 0, 0, 0, 0, 0, 0, 0),
u( 7, 6, 5, 4, 3, 2, 1, 0)));
assert_eq!(b( 1, 0, 0, 0, 0, 0, 0, 0),
u_le8(u( 0, 1, 2, 3, 4, 5, 6, 7),
u( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
u_le8(u(19, 18, 17, 16, 15, 14, 13, 12),
u(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
u_le8(u( 8, 3, 46, 0, 0, 9, 3, 2),
u( 7, 3, 24, 1, 0, 0, 5, 1)));
}
/// Helpers for creating u64s.
fn b(a: u64, b: u64, c: u64, d: u64,
e: u64, f: u64, g: u64, h: u64) -> u64 {
(a << 63) | (b << 55) | (c << 47) | (d << 39) |
(e << 31) | (f << 23) | (g << 15) | (h << 7)
}
fn u(a: u8, b: u8, c: u8, d: u8,
e: u8, f: u8, g: u8, h: u8) -> u64 {
((a as u64) << 56)
| ((b as u64) << 48)
| ((c as u64) << 40)
| ((d as u64) << 32)
| ((e as u64) << 24)
| ((f as u64) << 16)
| ((g as u64) << 8)
| (h as u64)
}
fn i(a: i8, b: i8, c: i8, d: i8,
e: i8, f: i8, g: i8, h: i8) -> u64 {
u(a as u8, b as u8, c as u8, d as u8,
e as u8, f as u8, g as u8, h as u8)
}
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
}
| true |
542b946dbab82b0020c75346c0ae0e9cb12ecfa5
|
Rust
|
andrewhickman/fs-err
|
/src/os.rs
|
UTF-8
| 355 | 2.578125 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! OS-specific functionality.
// The std-library has a couple more platforms than just `unix` for which these apis
// are defined, but we're using just `unix` here. We can always expand later.
#[cfg(unix)]
/// Platform-specific extensions for Unix platforms.
pub mod unix;
#[cfg(windows)]
/// Platform-specific extensions for Windows.
pub mod windows;
| true |
5e40fb9972d05f3cbb80ed8d39653a528aadeeef
|
Rust
|
konny0311/Rust_tutorial
|
/rectangles/src/lib.rs
|
UTF-8
| 1,117 | 3.984375 | 4 |
[] |
no_license
|
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, compared_rect: &Rectangle) -> bool {
let base_area : u32 = self.area();
let compared_area : u32 = compared_rect.area();
println!("Area of base rect: {}", base_area);
println!("Area of compared rect: {}", compared_area);
if base_area >= compared_area {
return true;
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
width: 8,
height: 7,
};
let smaller = Rectangle {
width: 5,
height: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_cannot_hold_larger() {
let larger = Rectangle {
width: 8,
height: 7,
};
let smaller = Rectangle {
width: 5,
height: 1,
};
assert!(!smaller.can_hold(&larger));
}
}
| true |
91e7b66ddd83fd3234c0e6ed11213d92c92b34bd
|
Rust
|
azdavis/advent-of-code
|
/years/y2015/src/d16.rs
|
UTF-8
| 1,421 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
use helpers::HashMap;
const EQ: [(&str, usize); 6] = [
("children", 3),
("samoyeds", 2),
("akitas", 0),
("vizslas", 0),
("cars", 2),
("perfumes", 1),
];
const GT: [(&str, usize); 2] = [("cats", 7), ("trees", 3)];
const LT: [(&str, usize); 2] = [("pomeranians", 3), ("goldfish", 5)];
fn run(s: &str, f: fn(HashMap<&str, usize>) -> bool) -> usize {
let idx = s
.lines()
.map(|line| {
let (_, info) = line.split_once(": ").unwrap();
info
.split(", ")
.map(|part| {
let (name, n) = part.split_once(": ").unwrap();
(name, n.parse().unwrap())
})
.collect::<HashMap<&str, usize>>()
})
.position(f)
.unwrap();
idx + 1
}
fn has(map: &HashMap<&str, usize>, key: &str, val: usize, f: fn(&usize, &usize) -> bool) -> bool {
map.get(key).map_or(true, |it| f(it, &val))
}
pub fn p1(s: &str) -> usize {
run(s, |ref map| {
std::iter::empty()
.chain(EQ)
.chain(GT)
.chain(LT)
.all(|(k, v)| has(map, k, v, PartialEq::eq))
})
}
pub fn p2(s: &str) -> usize {
run(s, |ref map| {
EQ.into_iter().all(|(k, v)| has(map, k, v, PartialEq::eq))
&& GT.into_iter().all(|(k, v)| has(map, k, v, PartialOrd::gt))
&& LT.into_iter().all(|(k, v)| has(map, k, v, PartialOrd::lt))
})
}
#[test]
fn t() {
let s = include_str!("input/d16.txt");
assert_eq!(p1(s), 213);
assert_eq!(p2(s), 323);
}
| true |
600b7fdb38d85ea31421cef1a964bea66a681596
|
Rust
|
khernyo/simple-message-channels-rs
|
/examples/basic.rs
|
UTF-8
| 688 | 2.59375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use bytes::Bytes;
use simple_message_channels::{Channel, Decoder, Encoder, MessageType};
fn main() {
let mut decoder = Decoder::new(None);
let mut encoder = Encoder::new(None);
let mut bytes = encoder
.send(Channel(0), MessageType(1), &Bytes::from(b"a".as_ref()))
.unwrap();
bytes.extend_from_slice(
&encoder
.send(Channel(0), MessageType(1), &Bytes::from(b"b".as_ref()))
.unwrap(),
);
bytes.extend_from_slice(
&encoder
.send(Channel(0), MessageType(1), &Bytes::from(b"c".as_ref()))
.unwrap(),
);
for msg in decoder.messages(bytes) {
println!("{:?}", msg);
}
}
| true |
0420198e5bf463b7cfec74395b87322318abf9b9
|
Rust
|
AlisCode/utopia
|
/utopia_core/src/math/mod.rs
|
UTF-8
| 2,814 | 3.921875 | 4 |
[] |
no_license
|
#[derive(Default, Debug, Clone, Copy)]
pub struct Size {
pub width: f32,
pub height: f32,
}
impl From<(f32, f32)> for Size {
fn from((width, height): (f32, f32)) -> Self {
Size { width, height }
}
}
impl Size {
pub const ZERO: Size = Size {
width: 0.,
height: 0.,
};
pub fn new(width: f32, height: f32) -> Self {
Size { width, height }
}
pub fn contains(&self, position: Vector2) -> bool {
position.x <= self.width
&& position.x >= 0.
&& position.y <= self.height
&& position.y >= 0.
}
#[inline]
pub fn expand(self) -> Size {
Size::new(self.width.expand(), self.height.expand())
}
/// Returns a new size bounded by `min` and `max.`
///
/// # Examples
///
/// ```
/// use kurbo::Size;
///
/// let this = Size::new(0., 100.);
/// let min = Size::new(10., 10.,);
/// let max = Size::new(50., 50.);
/// assert_eq!(this.clamp(min, max), Size::new(10., 50.))
/// ```
pub fn clamp(self, min: Size, max: Size) -> Self {
let width = self.width.max(min.width).min(max.width);
let height = self.height.max(min.height).min(max.height);
Size { width, height }
}
}
#[derive(Default, Debug, Clone, Copy)]
pub struct Vector2 {
pub x: f32,
pub y: f32,
}
impl std::ops::Add<Vector2> for Vector2 {
type Output = Vector2;
fn add(self, rhs: Vector2) -> Self::Output {
Vector2 {
x: self.x + rhs.x,
y: self.y + rhs.y,
}
}
}
impl std::ops::Sub<Vector2> for Vector2 {
type Output = Vector2;
fn sub(self, rhs: Vector2) -> Self::Output {
Vector2 {
x: self.x - rhs.x,
y: self.y - rhs.y,
}
}
}
impl Vector2 {
pub const ZERO: Vector2 = Vector2 { x: 0., y: 0. };
pub fn new(x: f32, y: f32) -> Self {
Vector2 { x, y }
}
}
#[derive(Debug, Clone, Copy)]
pub struct Rectangle {
pub origin: Vector2,
pub size: Size,
}
/// Adds convenience methods to `f32` and `f64`.
pub trait FloatExt<T> {
/// Rounds to the nearest integer away from zero,
/// unless the provided value is already an integer.
///
/// It is to `ceil` what `trunc` is to `floor`.
///
/// # Examples
///
/// ```
/// use kurbo::common::FloatExt;
///
/// let f = 3.7_f64;
/// let g = 3.0_f64;
/// let h = -3.7_f64;
/// let i = -5.1_f32;
///
/// assert_eq!(f.expand(), 4.0);
/// assert_eq!(g.expand(), 3.0);
/// assert_eq!(h.expand(), -4.0);
/// assert_eq!(i.expand(), -6.0);
/// ```
fn expand(&self) -> T;
}
impl FloatExt<f32> for f32 {
#[inline]
fn expand(&self) -> f32 {
self.abs().ceil().copysign(*self)
}
}
| true |
42c6a3ab3671e43f668a0d4d4a91dd5ad929c818
|
Rust
|
tempbottle/probor
|
/rust/src/errors.rs
|
UTF-8
| 3,363 | 3 | 3 |
[] |
no_license
|
use cbor;
use std::error::Error;
use std::fmt::{self, Formatter, Debug, Display};
pub enum DecodeError {
AbsentField(&'static str),
WrongArrayLength(usize),
DuplicateKey,
UnexpectedNull,
WrongType(&'static str, cbor::DecodeError),
WrongValue(&'static str),
BadFieldValue(&'static str, Box<DecodeError>),
BadArrayElement(usize, Box<DecodeError>),
SkippingError(cbor::DecodeError),
}
impl Debug for DecodeError {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
use self::DecodeError::*;
match self {
&AbsentField(field) => write!(fmt, "absent field {:?}", field),
&WrongArrayLength(n) => write!(fmt, "wrong array length {:?}", n),
&DuplicateKey => write!(fmt, "some key is duplicated"),
&UnexpectedNull => write!(fmt, "null is not expected"),
&WrongType(exp, ref err) => write!(fmt, "{}: {}", exp, err),
&WrongValue(exp) => write!(fmt, "{}", exp),
&BadFieldValue(field, ref err)
=> write!(fmt, "Bad value for {:?}: {}", field, err),
&BadArrayElement(num, ref err)
=> write!(fmt, "Bad array element {}: {}", num, err),
&SkippingError(ref err)
=> write!(fmt, "Error when skipping value: {}", err),
}
}
}
impl Display for DecodeError {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
use self::DecodeError::*;
match self {
&AbsentField(field) => write!(fmt, "absent field {:?}", field),
&WrongArrayLength(n) => write!(fmt, "wrong array length {:?}", n),
&DuplicateKey => write!(fmt, "some key is duplicated"),
&UnexpectedNull => write!(fmt, "null is not expected"),
&WrongType(exp, ref err) => write!(fmt, "{}: {}", exp, err),
&WrongValue(exp) => write!(fmt, "{}", exp),
&BadFieldValue(field, ref err)
=> write!(fmt, "Bad value for {:?}: {}", field, err),
&BadArrayElement(num, ref err)
=> write!(fmt, "Bad array element {}: {}", num, err),
&SkippingError(ref err)
=> write!(fmt, "Error when skipping value: {}", err),
}
}
}
impl Error for DecodeError {
fn description(&self) -> &'static str {
use self::DecodeError::*;
match self {
&AbsentField(_) => "absent field",
&WrongArrayLength(_) => "wrong array length",
&DuplicateKey => "some key is duplicated",
&UnexpectedNull => "unexpected null",
&WrongType(exp, _) => exp,
&WrongValue(exp) => exp,
&BadFieldValue(_, _) => "bad field value",
&BadArrayElement(_, _) => "bad array element",
&SkippingError(_) => "error when skipping value",
}
}
fn cause(&self) -> Option<&Error> {
use self::DecodeError::*;
match self {
&AbsentField(_) => None,
&WrongArrayLength(_) => None,
&DuplicateKey => None,
&UnexpectedNull => None,
&WrongType(_, ref err) => Some(err),
&WrongValue(_) => None,
&BadFieldValue(_, ref err) => Some(&**err),
&BadArrayElement(_, ref err) => Some(&**err),
&SkippingError(ref err) => Some(err),
}
}
}
| true |
0f9312665bf86ace9d05b084984d19ab8a4a8be7
|
Rust
|
rust-lang/regex
|
/regex-cli/cmd/find/which/mod.rs
|
UTF-8
| 7,216 | 2.5625 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unicode"
] |
permissive
|
use std::io::{stdout, Write};
use {
anyhow::Context,
lexopt::Parser,
regex_automata::{Input, MatchError, PatternID, PatternSet},
};
use crate::{
args,
util::{self, Table},
};
mod dfa;
mod nfa;
pub fn run(p: &mut Parser) -> anyhow::Result<()> {
const USAGE: &'static str = "\
Executes a 'which' search. This type of search reports *only* which patterns
match a haystack. It doesn't report positions or even how many times each
pattern matches. (Therefore, the -c/--count flag doesn't work with this
command.)
It is generally expected to use '--match-kind all' with this command, as the
intent is to report all overlapping matches.
Note that the search will usually scan the entire haystack. It can sometimes
short circuit if all patterns are anchored or if the search knows no more
patterns will match.
This type of search is somewhat of a legacy feature because of how the
top-level RegexSet API works in the 'regex' crate. Its API is pretty limited
and it is difficult to extend to the more flexible meta regex API in
regex-automata.
The 'backtrack' engine isn't supported here because it doesn't have a 'which'
search routine. In theory it could, but it would likely be slow and no better
than just running each regex over the haystack one at a time.
The 'onepass' engine also does not support this API. (At least, not currently.)
USAGE:
regex-cli find which <engine>
ENGINES:
dense Search with the dense DFA regex engine.
hybrid Search with the lazy DFA regex engine.
meta Search with the meta regex engine.
pikevm Search with the PikeVM regex engine.
regexset Search with the top-level API regex engine.
sparse Search with the sparse DFA regex engine.
";
let cmd = args::next_as_command(USAGE, p)?;
match &*cmd {
"dense" => dfa::run_dense(p),
"hybrid" => dfa::run_hybrid(p),
"meta" => run_meta(p),
"pikevm" => nfa::run_pikevm(p),
"regex" => run_regex(p),
"sparse" => dfa::run_sparse(p),
unk => anyhow::bail!("unrecognized command '{}'", unk),
}
}
fn run_regex(p: &mut lexopt::Parser) -> anyhow::Result<()> {
const USAGE: &'static str = "\
Executes a search for full matches using the top-level API regex engine.
USAGE:
regex-cli find match regex [-p <pattern> ...] <haystack-path>
regex-cli find match regex [-p <pattern> ...] -y <haystack>
TIP:
use -h for short docs and --help for long docs
OPTIONS:
%options%
";
let mut common = args::common::Config::default();
let mut patterns = args::patterns::Config::only_flags();
let mut haystack = args::haystack::Config::default();
let mut syntax = args::syntax::Config::default();
let mut api = args::api::Config::default();
let mut find = super::Config::default();
args::configure(
p,
USAGE,
&mut [
&mut common,
&mut patterns,
&mut haystack,
&mut syntax,
&mut api,
&mut find,
],
)?;
anyhow::ensure!(
!find.count,
"'which' command does not support reporting counts",
);
let pats = patterns.get()?;
let syn = syntax.syntax()?;
let mut table = Table::empty();
let (re, time) = util::timeitr(|| api.from_patterns_set(&syn, &pats))?;
table.add("build regex time", time);
// The top-level API doesn't support regex-automata's more granular Input
// abstraction.
let input = args::input::Config::default();
let search = |input: &Input<'_>, patset: &mut PatternSet| {
let matches = re.matches(input.haystack());
for pid in matches.iter() {
let pid = PatternID::new(pid).unwrap();
patset.try_insert(pid).unwrap();
}
Ok(())
};
run_search(
&mut table,
&common,
&find,
&input,
&haystack,
re.len(),
search,
)?;
Ok(())
}
fn run_meta(p: &mut lexopt::Parser) -> anyhow::Result<()> {
const USAGE: &'static str = "\
Executes a search for full matches using the meta regex engine.
USAGE:
regex-cli find match meta [-p <pattern> ...] <haystack-path>
regex-cli find match meta [-p <pattern> ...] -y <haystack>
TIP:
use -h for short docs and --help for long docs
OPTIONS:
%options%
";
let mut common = args::common::Config::default();
let mut input = args::input::Config::default();
let mut patterns = args::patterns::Config::only_flags();
let mut haystack = args::haystack::Config::default();
let mut syntax = args::syntax::Config::default();
let mut meta = args::meta::Config::default();
let mut find = super::Config::default();
args::configure(
p,
USAGE,
&mut [
&mut common,
&mut input,
&mut patterns,
&mut haystack,
&mut syntax,
&mut meta,
&mut find,
],
)?;
anyhow::ensure!(
!find.count,
"'which' command does not support reporting counts",
);
let pats = patterns.get()?;
let mut table = Table::empty();
let re = if meta.build_from_patterns() {
let (re, time) = util::timeitr(|| meta.from_patterns(&syntax, &pats))?;
table.add("build meta time", time);
re
} else {
let (asts, time) = util::timeitr(|| syntax.asts(&pats))?;
table.add("parse time", time);
let (hirs, time) = util::timeitr(|| syntax.hirs(&pats, &asts))?;
table.add("translate time", time);
let (re, time) = util::timeitr(|| meta.from_hirs(&hirs))?;
table.add("build meta time", time);
re
};
let search = |input: &Input<'_>, patset: &mut PatternSet| {
Ok(re.which_overlapping_matches(input, patset))
};
run_search(
&mut table,
&common,
&find,
&input,
&haystack,
re.pattern_len(),
search,
)?;
Ok(())
}
/// Like `run_counts`, but prints the actual matches instead.
fn run_search(
table: &mut Table,
common: &args::common::Config,
find: &super::Config,
input: &args::input::Config,
haystack: &args::haystack::Config,
pattern_len: usize,
mut search: impl FnMut(&Input<'_>, &mut PatternSet) -> Result<(), MatchError>,
) -> anyhow::Result<()> {
let mut out = stdout();
input.with(haystack, |input| {
let (patset, time) = util::timeitr(|| {
let mut patset = PatternSet::new(pattern_len);
for _ in 0..find.repeat() {
search(&input, &mut patset)?;
}
Ok::<_, anyhow::Error>(patset)
})?;
table.add("search time", time);
table.add("patterns that matched", patset.len());
if common.table() {
table.print(&mut out)?;
}
if !common.quiet {
for i in 0..pattern_len {
let pid = PatternID::new(i).context("invalid pattern ID")?;
writeln!(
out,
"{}:{:?}",
pid.as_usize(),
patset.contains(pid)
)?;
}
}
Ok(())
})
}
| true |
f6f184591efb4fcb371cada34ccc6c8c20fe5194
|
Rust
|
GuillaumeGomez/docs.rs
|
/src/utils/consistency/diff.rs
|
UTF-8
| 6,803 | 3.203125 | 3 |
[
"MIT"
] |
permissive
|
use std::fmt::Display;
use super::data::Crate;
use itertools::{
EitherOrBoth::{Both, Left, Right},
Itertools,
};
#[derive(Debug, PartialEq)]
pub(super) enum Difference {
CrateNotInIndex(String),
CrateNotInDb(String, Vec<String>),
ReleaseNotInIndex(String, String),
ReleaseNotInDb(String, String),
ReleaseYank(String, String, bool),
}
impl Display for Difference {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Difference::CrateNotInIndex(name) => {
write!(f, "Crate in db not in index: {name}")?;
}
Difference::CrateNotInDb(name, _versions) => {
write!(f, "Crate in index not in db: {name}")?;
}
Difference::ReleaseNotInIndex(name, version) => {
write!(f, "Release in db not in index: {name} {version}")?;
}
Difference::ReleaseNotInDb(name, version) => {
write!(f, "Release in index not in db: {name} {version}")?;
}
Difference::ReleaseYank(name, version, yanked) => {
write!(
f,
"release yanked difference, index yanked:{yanked}, release: {name} {version}",
)?;
}
}
Ok(())
}
}
pub(super) fn calculate_diff<'a, I>(db_data: I, index_data: I) -> Vec<Difference>
where
I: Iterator<Item = &'a Crate>,
{
let mut result = Vec::new();
for crates_diff in db_data.merge_join_by(index_data, |db, index| db.name.cmp(&index.name)) {
match crates_diff {
Both(db_crate, index_crate) => {
for release_diff in db_crate
.releases
.iter()
.merge_join_by(index_crate.releases.iter(), |db_release, index_release| {
db_release.version.cmp(&index_release.version)
})
{
match release_diff {
Both(db_release, index_release) => {
let index_yanked =
index_release.yanked.expect("index always has yanked-state");
// if `db_release.yanked` is `None`, the record
// is coming from the build queue, not the `releases`
// table.
// In this case, we skip this check.
if let Some(db_yanked) = db_release.yanked {
if db_yanked != index_yanked {
result.push(Difference::ReleaseYank(
db_crate.name.clone(),
db_release.version.clone(),
index_yanked,
));
}
}
}
Left(db_release) => result.push(Difference::ReleaseNotInIndex(
db_crate.name.clone(),
db_release.version.clone(),
)),
Right(index_release) => result.push(Difference::ReleaseNotInDb(
index_crate.name.clone(),
index_release.version.clone(),
)),
}
}
}
Left(db_crate) => result.push(Difference::CrateNotInIndex(db_crate.name.clone())),
Right(index_crate) => result.push(Difference::CrateNotInDb(
index_crate.name.clone(),
index_crate
.releases
.iter()
.map(|r| r.version.clone())
.collect(),
)),
};
}
result
}
#[cfg(test)]
mod tests {
use super::super::data::Release;
use super::*;
use std::iter;
#[test]
fn test_empty() {
assert!(calculate_diff(iter::empty(), iter::empty()).is_empty());
}
#[test]
fn test_crate_not_in_index() {
let db_releases = vec![Crate {
name: "krate".into(),
releases: vec![],
}];
assert_eq!(
calculate_diff(db_releases.iter(), vec![].iter()),
vec![Difference::CrateNotInIndex("krate".into())]
);
}
#[test]
fn test_crate_not_in_db() {
let index_releases = vec![Crate {
name: "krate".into(),
releases: vec![
Release {
version: "0.0.2".into(),
yanked: Some(false),
},
Release {
version: "0.0.3".into(),
yanked: Some(true),
},
],
}];
assert_eq!(
calculate_diff(vec![].iter(), index_releases.iter()),
vec![Difference::CrateNotInDb(
"krate".into(),
vec!["0.0.2".into(), "0.0.3".into()]
)]
);
}
#[test]
fn test_yank_diff() {
let db_releases = vec![Crate {
name: "krate".into(),
releases: vec![
Release {
version: "0.0.2".into(),
yanked: Some(true),
},
Release {
version: "0.0.3".into(),
yanked: Some(true),
},
],
}];
let index_releases = vec![Crate {
name: "krate".into(),
releases: vec![
Release {
version: "0.0.2".into(),
yanked: Some(false),
},
Release {
version: "0.0.3".into(),
yanked: Some(true),
},
],
}];
assert_eq!(
calculate_diff(db_releases.iter(), index_releases.iter()),
vec![Difference::ReleaseYank(
"krate".into(),
"0.0.2".into(),
false,
)]
);
}
#[test]
fn test_yank_diff_without_db_data() {
let db_releases = vec![Crate {
name: "krate".into(),
releases: vec![Release {
version: "0.0.2".into(),
yanked: None,
}],
}];
let index_releases = vec![Crate {
name: "krate".into(),
releases: vec![Release {
version: "0.0.2".into(),
yanked: Some(false),
}],
}];
assert!(calculate_diff(db_releases.iter(), index_releases.iter()).is_empty());
}
}
| true |
0bbac58213d888db2c6fc5ccab6f9b6d65b5fa4c
|
Rust
|
digitsu/bitcrust
|
/encode-derive/src/lib.rs
|
UTF-8
| 1,524 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
#[proc_macro_derive(Encode, attributes(count))]
pub fn derive_encode(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_encode(&ast);
// Return the generated impl
gen.parse().unwrap()
}
fn impl_encode(ast: &syn::DeriveInput) -> quote::Tokens {
let name = &ast.ident;
let fields = match ast.body {
syn::Body::Struct(ref data) => data.fields(),
syn::Body::Enum(_) => panic!("#[derive(Encode)] can only be used with structs"),
};
let fields = generate_fields(&fields);
quote! {
impl Encode for #name {
fn encode(&self, mut buff: &mut Vec<u8>) -> Result<(), ::std::io::Error> {
#(#fields)*
Ok(())
}
}
}
}
fn generate_fields(fields: &[syn::Field]) -> Vec<quote::Tokens> {
let mut result = Vec::new();
for field in fields {
let ident = &field.ident;
if field.attrs.iter().any(|f| f.value.name() == "count") {
result.push(quote!{
VarInt::new(self.#ident.len() as u64).encode(&mut buff)?;
});
}
result.push(quote!{
self.#ident.encode(&mut buff)?;
});
}
result
}
| true |
a17f255fca0e66f19b2a2daa3fa08a40e659d8fa
|
Rust
|
SViksha/jormungandr
|
/tests/common/file_utils.rs
|
UTF-8
| 1,415 | 3.53125 | 4 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
#![allow(dead_code)]
extern crate mktemp;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
/// Gets path in temp directory (does not create it)
///
/// # Arguments
///
/// * `file_path` - A string slice that holds the path
/// that will be glued to temp directory path
///
/// # Example
///
/// use file_utils::get_path_in_temp;
/// let path_in_temp = "test.txt";
/// get_path_in_temp(&path_in_temp);
///
pub fn get_path_in_temp(file_path: &str) -> PathBuf {
let mut path = get_temp_folder();
path.push(&file_path);
path
}
pub fn create_empty_file_in_temp(file_name: &str) -> PathBuf {
let path = create_file_in_temp(&file_name, "");
path
}
pub fn get_temp_folder() -> PathBuf {
let temp_dir = mktemp::Temp::new_dir().unwrap();
let path = temp_dir.to_path_buf();
temp_dir.release();
path
}
/// Creates file in temporary folder
pub fn create_file_in_temp(file_name: &str, content: &str) -> PathBuf {
let path = get_path_in_temp(&file_name);
let mut file = File::create(&path).unwrap();
file.write_all(content.as_bytes())
.expect(&format!("cannot write to file {:?}", path));
path
}
/// Creates file with content
pub fn create_file_with_content(path: &PathBuf, content: &str) -> () {
let mut file = File::create(&path).unwrap();
file.write_all(content.as_bytes())
.expect(&format!("cannot write to file {:?}", path));
}
| true |
f6c7b0e35bc5edd86297693eed3f9ac60f6379f4
|
Rust
|
RReverser/serde-xml-rs
|
/src/de/map.rs
|
UTF-8
| 5,070 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
use std::io::Read;
use serde::de::{self, IntoDeserializer, Unexpected};
use serde::forward_to_deserialize_any;
use xml::attribute::OwnedAttribute;
use xml::reader::XmlEvent;
use crate::error::{Error, Result};
use crate::Deserializer;
use super::buffer::BufferedXmlReader;
pub struct MapAccess<'a, R: Read, B: BufferedXmlReader<R>> {
attrs: ::std::vec::IntoIter<OwnedAttribute>,
/// Cache of attribute value, populated when visitor calls `next_key_seed`; should be read & emptied straight after
/// by visitor call to `next_value_seed`
next_attr_value: Option<String>,
de: &'a mut Deserializer<R, B>,
/// Whether this `MapAccess` is to deserialize all inner contents of an outer element.
inner_value: bool,
}
impl<'a, R: 'a + Read, B: BufferedXmlReader<R>> MapAccess<'a, R, B> {
pub fn new(
de: &'a mut Deserializer<R, B>,
attrs: Vec<OwnedAttribute>,
inner_value: bool,
) -> Self {
MapAccess {
attrs: attrs.into_iter(),
next_attr_value: None,
de: de,
inner_value: inner_value,
}
}
}
impl<'de, 'a, R: 'a + Read, B: BufferedXmlReader<R>> de::MapAccess<'de> for MapAccess<'a, R, B> {
type Error = Error;
fn next_key_seed<K: de::DeserializeSeed<'de>>(&mut self, seed: K) -> Result<Option<K::Value>> {
debug_assert_eq!(self.next_attr_value, None);
match self.attrs.next() {
// Read all attributes first
Some(OwnedAttribute { name, value }) => {
self.next_attr_value = Some(value);
seed.deserialize(name.local_name.into_deserializer())
.map(Some)
}
None => match *self.de.peek()? {
XmlEvent::StartElement { ref name, .. } => seed
.deserialize(
if !self.inner_value {
name.local_name.as_str()
} else {
"$value"
}
.into_deserializer(),
)
.map(Some),
XmlEvent::Characters(_) => seed.deserialize("$value".into_deserializer()).map(Some),
// Any other event: assume end of map values (actual check for `EndElement` done by the originating
// `Deserializer`)
_ => Ok(None),
},
}
}
fn next_value_seed<V: de::DeserializeSeed<'de>>(&mut self, seed: V) -> Result<V::Value> {
match self.next_attr_value.take() {
Some(value) => seed.deserialize(AttrValueDeserializer(value)),
None => {
if !self.inner_value {
if let XmlEvent::StartElement { .. } = *self.de.peek()? {
self.de.set_map_value();
}
}
let result = seed.deserialize(&mut *self.de)?;
Ok(result)
}
}
}
fn size_hint(&self) -> Option<usize> {
self.attrs.size_hint().1
}
}
struct AttrValueDeserializer(String);
macro_rules! deserialize_type_attr {
($deserialize:ident => $visit:ident) => {
fn $deserialize<V: de::Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
visitor.$visit(self.0.parse()?)
}
};
}
impl<'de> de::Deserializer<'de> for AttrValueDeserializer {
type Error = Error;
fn deserialize_any<V: de::Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
visitor.visit_string(self.0)
}
deserialize_type_attr!(deserialize_i8 => visit_i8);
deserialize_type_attr!(deserialize_i16 => visit_i16);
deserialize_type_attr!(deserialize_i32 => visit_i32);
deserialize_type_attr!(deserialize_i64 => visit_i64);
deserialize_type_attr!(deserialize_u8 => visit_u8);
deserialize_type_attr!(deserialize_u16 => visit_u16);
deserialize_type_attr!(deserialize_u32 => visit_u32);
deserialize_type_attr!(deserialize_u64 => visit_u64);
deserialize_type_attr!(deserialize_f32 => visit_f32);
deserialize_type_attr!(deserialize_f64 => visit_f64);
fn deserialize_enum<V: de::Visitor<'de>>(
self,
_name: &str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value> {
visitor.visit_enum(self.0.into_deserializer())
}
fn deserialize_option<V: de::Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
visitor.visit_some(self)
}
fn deserialize_bool<V: de::Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
match self.0.as_str() {
"true" | "1" => visitor.visit_bool(true),
"false" | "0" => visitor.visit_bool(false),
_ => Err(de::Error::invalid_value(
Unexpected::Str(&self.0),
&"a boolean",
)),
}
}
forward_to_deserialize_any! {
char str string unit seq bytes map unit_struct newtype_struct tuple_struct
struct identifier tuple ignored_any byte_buf
}
}
| true |
7f581bfa56b198a773b886de56296c50f279fe87
|
Rust
|
AlekseyAstakhov/diskomap
|
/src/file_worker.rs
|
UTF-8
| 3,170 | 3.5625 | 4 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use std::sync::mpsc::{channel, Sender};
use std::thread::{spawn, JoinHandle};
/// For write to the file in background thread.
pub(crate) struct FileWorker {
task_sender: Sender<FileWorkerTask>,
join_handle: Option<JoinHandle<()>>,
}
impl FileWorker {
/// Constructs 'FileWorker' for write to the file in background thread.
/// Writes in the order of queue.
/// Parameter 'file' is opened and exclusive locked file.
/// Parameter 'error_callback' callback for receive errors or writing to the file.
pub fn new<Writer>(
mut file: Writer,
mut error_callback: Option<Box<dyn FnMut(std::io::Error) + Send>>
) -> Self
where
Writer: std::io::Write + Send + 'static
{
let (tasks_sender, task_receiver) = channel();
let join_handle = Some(spawn(move || 'thread_loop: loop {
let task = task_receiver.recv()
.unwrap_or_else(|err| unreachable!(err)); // unreachable because owner thread will join this thread handle after send FileWorkerTask::Stop and only after will disconnect channel
match task {
FileWorkerTask::WriteString(data) => {
if let Err(err) = file.write_all(data.as_bytes()) {
if let Some(callback) = &mut error_callback { callback(err); }
}
},
FileWorkerTask::WriteBytes(data) => {
if let Err(err) = file.write_all(&data) {
if let Some(callback) = &mut error_callback { callback(err); }
}
},
FileWorkerTask::Stop => {
break 'thread_loop;
},
}
}));
FileWorker { task_sender: tasks_sender, join_handle }
}
/// Write data to the file in the background thread.
pub fn write_string(&self, data: String) {
let task = FileWorkerTask::WriteString(data);
self.task_sender.send(task)
.unwrap_or_else(|err| unreachable!(err)); // unreachable because channel receiver will drop only after out of thread and thread can't stop while FileWorkerTask::Stop is not received
}
/// Write data to the file in the background thread.
pub fn write_bytes(&self, data: Vec<u8>) {
let task = FileWorkerTask::WriteBytes(data);
self.task_sender.send(task)
.unwrap_or_else(|err| unreachable!(err)); // unreachable because channel receiver will drop only after out of thread and thread can't stop while FileWorkerTask::Stop is not received
}
}
impl Drop for FileWorker {
fn drop(&mut self) {
self.task_sender.send(FileWorkerTask::Stop)
.unwrap_or_else(|err| unreachable!(err)); // unreachable because thread can't stop while FileWorkerTask::Stop is not received
self.join_handle.take().map(JoinHandle::join);
}
}
/// Task for sending to worker thread.
enum FileWorkerTask {
/// Write line to the file in the background thread.
WriteString(String),
/// Write data block to the file in the background thread.
WriteBytes(Vec<u8>),
/// Stop worker.
Stop,
}
| true |
8bbcfa597d7736d7a2016b827624eac2a3f6c704
|
Rust
|
AntonGepting/tmux-interface-rs
|
/src/commands/windows_and_panes/previous_window_tests.rs
|
UTF-8
| 1,022 | 2.921875 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#[test]
fn previous_window() {
use crate::PreviousWindow;
use std::borrow::Cow;
// tmux ^0.9:
// ```text
// previous-window [-a] [-t target-session]
// (alias: prev)
// ```
//
// tmux ^0.8:
// ```text
// previous-window [-t target-session]
// (alias: prev)
// ```
let previous_window = PreviousWindow::new();
#[cfg(feature = "tmux_0_9")]
let previous_window = previous_window.parent_sighup();
#[cfg(feature = "tmux_0_8")]
let previous_window = previous_window.target_session("1");
#[cfg(not(feature = "cmd_alias"))]
let cmd = "previous-window";
#[cfg(feature = "cmd_alias")]
let cmd = "prev";
let mut s = Vec::new();
s.push(cmd);
#[cfg(feature = "tmux_0_9")]
s.push("-a");
#[cfg(feature = "tmux_0_8")]
s.extend_from_slice(&["-t", "1"]);
let s: Vec<Cow<str>> = s.into_iter().map(|a| a.into()).collect();
let previous_window = previous_window.build().to_vec();
assert_eq!(previous_window, s);
}
| true |
4da80c59b75f084be494e6faa63f8d0d45e39e1b
|
Rust
|
pac85/vulkano-events
|
/vulkano/src/buffer/sys.rs
|
UTF-8
| 19,119 | 2.59375 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Low level implementation of buffers.
//!
//! Wraps directly around Vulkan buffers, with the exceptions of a few safety checks.
//!
//! The `UnsafeBuffer` type is the lowest-level buffer object provided by this library. It is used
//! internally by the higher-level buffer types. You are strongly encouraged to have excellent
//! knowledge of the Vulkan specs if you want to use an `UnsafeBuffer`.
//!
//! Here is what you must take care of when you use an `UnsafeBuffer`:
//!
//! - Synchronization, ie. avoid reading and writing simultaneously to the same buffer.
//! - Memory aliasing considerations. If you use the same memory to back multiple resources, you
//! must ensure that they are not used together and must enable some additional flags.
//! - Binding memory correctly and only once. If you use sparse binding, respect the rules of
//! sparse binding.
//! - Type safety.
use crate::buffer::BufferUsage;
use crate::check_errors;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::memory::DeviceMemory;
use crate::memory::DeviceMemoryAllocError;
use crate::memory::MemoryRequirements;
use crate::sync::Sharing;
use crate::vk;
use crate::Error;
use crate::OomError;
use crate::VulkanObject;
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::mem;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Arc;
/// Data storage in a GPU-accessible location.
pub struct UnsafeBuffer {
buffer: vk::Buffer,
device: Arc<Device>,
size: usize,
usage: BufferUsage,
}
impl UnsafeBuffer {
/// Creates a new buffer of the given size.
///
/// See the module's documentation for information about safety.
///
/// # Panic
///
/// - Panics if `sparse.sparse` is false and `sparse.sparse_residency` or `sparse.sparse_aliased` is true.
/// - Panics if `usage` is empty.
///
pub unsafe fn new<'a, I>(
device: Arc<Device>,
size: usize,
mut usage: BufferUsage,
sharing: Sharing<I>,
sparse: Option<SparseLevel>,
) -> Result<(UnsafeBuffer, MemoryRequirements), BufferCreationError>
where
I: Iterator<Item = u32>,
{
let vk = device.pointers();
// Ensure we're not trying to create an empty buffer.
let size = if size == 0 {
// To avoid panicking when allocating 0 bytes, use a 1-byte buffer.
1
} else {
size
};
// Checking sparse features.
let flags = if let Some(sparse_level) = sparse {
if !device.enabled_features().sparse_binding {
return Err(BufferCreationError::SparseBindingFeatureNotEnabled);
}
if sparse_level.sparse_residency && !device.enabled_features().sparse_residency_buffer {
return Err(BufferCreationError::SparseResidencyBufferFeatureNotEnabled);
}
if sparse_level.sparse_aliased && !device.enabled_features().sparse_residency_aliased {
return Err(BufferCreationError::SparseResidencyAliasedFeatureNotEnabled);
}
sparse_level.into()
} else {
0
};
if usage.device_address
&& !(device.enabled_features().buffer_device_address
|| device.enabled_features().ext_buffer_device_address)
{
usage.device_address = false;
if vk::BufferUsageFlags::from(usage) == 0 {
// return an error iff device_address was the only requested usage and the
// feature isn't enabled. Otherwise we'll hit that assert below.
// TODO: This is weird, why not just return an error always if the feature is not enabled?
// You can't use BufferUsage::all() anymore, but is that a good idea anyway?
return Err(BufferCreationError::DeviceAddressFeatureNotEnabled);
}
}
let usage_bits = usage.into();
// Checking for empty BufferUsage.
assert!(
usage_bits != 0,
"Can't create buffer with empty BufferUsage"
);
let buffer = {
let (sh_mode, sh_indices) = match sharing {
Sharing::Exclusive => (vk::SHARING_MODE_EXCLUSIVE, SmallVec::<[u32; 8]>::new()),
Sharing::Concurrent(ids) => (vk::SHARING_MODE_CONCURRENT, ids.collect()),
};
let infos = vk::BufferCreateInfo {
sType: vk::STRUCTURE_TYPE_BUFFER_CREATE_INFO,
pNext: ptr::null(),
flags,
size: size as u64,
usage: usage_bits,
sharingMode: sh_mode,
queueFamilyIndexCount: sh_indices.len() as u32,
pQueueFamilyIndices: sh_indices.as_ptr(),
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateBuffer(
device.internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
let mem_reqs = {
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
let mut output = if device.loaded_extensions().khr_get_memory_requirements2 {
let infos = vk::BufferMemoryRequirementsInfo2KHR {
sType: vk::STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR,
pNext: ptr::null_mut(),
buffer: buffer,
};
let mut output2 = if device.loaded_extensions().khr_dedicated_allocation {
Some(vk::MemoryDedicatedRequirementsKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
pNext: ptr::null_mut(),
prefersDedicatedAllocation: mem::zeroed(),
requiresDedicatedAllocation: mem::zeroed(),
})
} else {
None
};
let mut output = vk::MemoryRequirements2KHR {
sType: vk::STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
pNext: output2
.as_mut()
.map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
.unwrap_or(ptr::null_mut()) as *mut _,
memoryRequirements: mem::zeroed(),
};
vk.GetBufferMemoryRequirements2KHR(device.internal_object(), &infos, &mut output);
debug_assert!(output.memoryRequirements.size >= size as u64);
debug_assert!(output.memoryRequirements.memoryTypeBits != 0);
let mut out = MemoryRequirements::from(output.memoryRequirements);
if let Some(output2) = output2 {
debug_assert_eq!(output2.requiresDedicatedAllocation, 0);
out.prefer_dedicated = output2.prefersDedicatedAllocation != 0;
}
out
} else {
let mut output: MaybeUninit<vk::MemoryRequirements> = MaybeUninit::uninit();
vk.GetBufferMemoryRequirements(
device.internal_object(),
buffer,
output.as_mut_ptr(),
);
let output = output.assume_init();
debug_assert!(output.size >= size as u64);
debug_assert!(output.memoryTypeBits != 0);
MemoryRequirements::from(output)
};
// We have to manually enforce some additional requirements for some buffer types.
let limits = device.physical_device().limits();
if usage.uniform_texel_buffer || usage.storage_texel_buffer {
output.alignment = align(
output.alignment,
limits.min_texel_buffer_offset_alignment() as usize,
);
}
if usage.storage_buffer {
output.alignment = align(
output.alignment,
limits.min_storage_buffer_offset_alignment() as usize,
);
}
if usage.uniform_buffer {
output.alignment = align(
output.alignment,
limits.min_uniform_buffer_offset_alignment() as usize,
);
}
output
};
let obj = UnsafeBuffer {
buffer: buffer,
device: device.clone(),
size: size as usize,
usage,
};
Ok((obj, mem_reqs))
}
/// Binds device memory to this buffer.
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize) -> Result<(), OomError> {
let vk = self.device.pointers();
// We check for correctness in debug mode.
debug_assert!({
let mut mem_reqs = MaybeUninit::uninit();
vk.GetBufferMemoryRequirements(
self.device.internal_object(),
self.buffer,
mem_reqs.as_mut_ptr(),
);
let mem_reqs = mem_reqs.assume_init();
mem_reqs.size <= (memory.size() - offset) as u64
&& (offset as u64 % mem_reqs.alignment) == 0
&& mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
});
// Check for alignment correctness.
{
let limits = self.device().physical_device().limits();
if self.usage().uniform_texel_buffer || self.usage().storage_texel_buffer {
debug_assert!(offset % limits.min_texel_buffer_offset_alignment() as usize == 0);
}
if self.usage().storage_buffer {
debug_assert!(offset % limits.min_storage_buffer_offset_alignment() as usize == 0);
}
if self.usage().uniform_buffer {
debug_assert!(offset % limits.min_uniform_buffer_offset_alignment() as usize == 0);
}
}
check_errors(vk.BindBufferMemory(
self.device.internal_object(),
self.buffer,
memory.internal_object(),
offset as vk::DeviceSize,
))?;
Ok(())
}
/// Returns the size of the buffer in bytes.
#[inline]
pub fn size(&self) -> usize {
self.size
}
/// Returns the buffer the image was created with.
#[inline]
pub fn usage(&self) -> BufferUsage {
self.usage
}
/// Returns a key unique to each `UnsafeBuffer`. Can be used for the `conflicts_key` method.
#[inline]
pub fn key(&self) -> u64 {
self.buffer
}
}
unsafe impl VulkanObject for UnsafeBuffer {
type Object = vk::Buffer;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_BUFFER;
#[inline]
fn internal_object(&self) -> vk::Buffer {
self.buffer
}
}
unsafe impl DeviceOwned for UnsafeBuffer {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl fmt::Debug for UnsafeBuffer {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan buffer {:?}>", self.buffer)
}
}
impl Drop for UnsafeBuffer {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyBuffer(self.device.internal_object(), self.buffer, ptr::null());
}
}
}
impl PartialEq for UnsafeBuffer {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.buffer == other.buffer && self.device == other.device
}
}
impl Eq for UnsafeBuffer {}
impl Hash for UnsafeBuffer {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.buffer.hash(state);
self.device.hash(state);
}
}
/// The level of sparse binding that a buffer should be created with.
#[derive(Debug, Copy, Clone)]
pub struct SparseLevel {
pub sparse_residency: bool,
pub sparse_aliased: bool,
}
impl SparseLevel {
#[inline]
pub fn none() -> SparseLevel {
SparseLevel {
sparse_residency: false,
sparse_aliased: false,
}
}
}
impl From<SparseLevel> for vk::BufferCreateFlags {
#[inline]
fn from(val: SparseLevel) -> Self {
let mut result = vk::BUFFER_CREATE_SPARSE_BINDING_BIT;
if val.sparse_residency {
result |= vk::BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
}
if val.sparse_aliased {
result |= vk::BUFFER_CREATE_SPARSE_ALIASED_BIT;
}
result
}
}
/// The device address usage flag was not set.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct DeviceAddressUsageNotEnabledError;
impl error::Error for DeviceAddressUsageNotEnabledError {}
impl fmt::Display for DeviceAddressUsageNotEnabledError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("the device address usage flag was not set on this buffer")
}
}
/// Error that can happen when creating a buffer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BufferCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// Sparse binding was requested but the corresponding feature wasn't enabled.
SparseBindingFeatureNotEnabled,
/// Sparse residency was requested but the corresponding feature wasn't enabled.
SparseResidencyBufferFeatureNotEnabled,
/// Sparse aliasing was requested but the corresponding feature wasn't enabled.
SparseResidencyAliasedFeatureNotEnabled,
/// Device address was requested but the corresponding feature wasn't enabled.
DeviceAddressFeatureNotEnabled,
}
impl error::Error for BufferCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
BufferCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for BufferCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
BufferCreationError::AllocError(_) => "allocating memory failed",
BufferCreationError::SparseBindingFeatureNotEnabled => {
"sparse binding was requested but the corresponding feature wasn't enabled"
}
BufferCreationError::SparseResidencyBufferFeatureNotEnabled => {
"sparse residency was requested but the corresponding feature wasn't enabled"
}
BufferCreationError::SparseResidencyAliasedFeatureNotEnabled => {
"sparse aliasing was requested but the corresponding feature wasn't enabled"
}
BufferCreationError::DeviceAddressFeatureNotEnabled => {
"device address was requested but the corresponding feature wasn't enabled"
}
}
)
}
}
impl From<OomError> for BufferCreationError {
#[inline]
fn from(err: OomError) -> BufferCreationError {
BufferCreationError::AllocError(err.into())
}
}
impl From<Error> for BufferCreationError {
#[inline]
fn from(err: Error) -> BufferCreationError {
match err {
err @ Error::OutOfHostMemory => {
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err))
}
err @ Error::OutOfDeviceMemory => {
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err))
}
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
use std::iter::Empty;
use super::BufferCreationError;
use super::BufferUsage;
use super::SparseLevel;
use super::UnsafeBuffer;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::sync::Sharing;
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let (buf, reqs) = unsafe {
UnsafeBuffer::new(
device.clone(),
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
None,
)
}
.unwrap();
assert!(reqs.size >= 128);
assert_eq!(buf.size(), 128);
assert_eq!(&**buf.device() as *const Device, &*device as *const Device);
}
#[test]
fn missing_feature_sparse_binding() {
let (device, _) = gfx_dev_and_queue!();
let sparse = Some(SparseLevel::none());
unsafe {
match UnsafeBuffer::new(
device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse,
) {
Err(BufferCreationError::SparseBindingFeatureNotEnabled) => (),
_ => panic!(),
}
};
}
#[test]
fn missing_feature_sparse_residency() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
let sparse = Some(SparseLevel {
sparse_residency: true,
sparse_aliased: false,
});
unsafe {
match UnsafeBuffer::new(
device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse,
) {
Err(BufferCreationError::SparseResidencyBufferFeatureNotEnabled) => (),
_ => panic!(),
}
};
}
#[test]
fn missing_feature_sparse_aliased() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
let sparse = Some(SparseLevel {
sparse_residency: false,
sparse_aliased: true,
});
unsafe {
match UnsafeBuffer::new(
device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse,
) {
Err(BufferCreationError::SparseResidencyAliasedFeatureNotEnabled) => (),
_ => panic!(),
}
};
}
#[test]
fn create_empty_buffer() {
let (device, _) = gfx_dev_and_queue!();
unsafe {
let _ = UnsafeBuffer::new(
device,
0,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
None,
);
};
}
}
| true |
97db85d620417a8ef8305972e9fb32fa111388a5
|
Rust
|
reline/AdventOfCode2020
|
/src/reduce.rs
|
UTF-8
| 433 | 3.109375 | 3 |
[] |
no_license
|
pub trait ReduceExt: Iterator {
fn reduce<F>(self, f: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(Self::Item, Self::Item) -> Self::Item;
}
impl<I: Iterator> ReduceExt for I {
fn reduce<F>(mut self, f: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(Self::Item, Self::Item) -> Self::Item,
{
let first = self.next()?;
Some(self.fold(first, f))
}
}
| true |
2fb8ffb1d61e8c0f59c7d82803d4b24aad7200bc
|
Rust
|
luke-titley/imath-traits
|
/src/lib.rs
|
UTF-8
| 1,933 | 2.9375 | 3 |
[
"Apache-2.0"
] |
permissive
|
//! imath-traits provides a set of traits which constrain the types used in Rust translations of
//! C++ APIs that rely on `Imath`, or `Imath-alike` types.
//!
//! This is solely about memory layout and being able to convert the implementing types back and
//! forward into slices and pointers to be able to be used in the FFI call, thus the traits contain
//! no methods other than for converting back and forth between slices and raw pointers.
//!
//! To use, simply add the feature for the math crate you need to the dependency
//! of any crate that uses imath-traits (these will be called `imath_<crate>`, and types will just work with any function
//! from that crate that expects a Vec2<T>, Vec3<T>, Vec4<T>, Bound2<T> or Bound3<T>:
//!
//! ```toml
//! openexr = { version = "0.10-3.0.1", features=["imath_cgmath"] }
//! ```
//!
//! Currently, we support glam, nalgebra and nalgebra_glm. If you need another math
//! crate, implement support for it and submit a PR, or request it. Note that the
//! crate must support 2-, 3- and 4-dimensional vectors of i32, f32 and f64.
//!
pub use half::f16;
pub mod vec;
pub use vec::*;
pub mod bound;
pub use bound::*;
pub mod matrix;
pub use matrix::*;
pub mod zero;
pub use zero::Zero;
#[cfg(feature = "cgmath")]
pub mod impl_cgmath;
#[cfg(feature = "cgmath")]
pub use impl_cgmath::{Box2, Box2d, Box2f, Box2i, Box3, Box3d, Box3f, Box3i};
#[cfg(feature = "glam")]
pub mod impl_glam;
#[cfg(feature = "glam")]
pub use impl_glam::{Box2d, Box2f, Box2i, Box3d, Box3f, Box3i};
#[cfg(feature = "nalgebra")]
pub mod impl_nalgebra;
#[cfg(feature = "nalgebra")]
pub use impl_nalgebra::{Box2d, Box2f, Box2i, Box3d, Box3f, Box3i};
#[cfg(feature = "nalgebra-glm")]
pub mod impl_nalgebra_glm;
#[cfg(feature = "nalgebra_glm")]
pub use impl_nalgebra_glm::{Box2d, Box2f, Box2i, Box3d, Box3f, Box3i};
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
| true |
59aa738a83693c0269a9ff382784749f99a253a0
|
Rust
|
kdeconinck/Monkey-1
|
/src/compiler/symbol_table.rs
|
UTF-8
| 2,565 | 3.359375 | 3 |
[
"MIT"
] |
permissive
|
use std::collections::HashMap;
use std::mem;
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum SymbolScope {
GLOBAL,
LOCAL,
BUILTIN,
FREE
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Symbol {
pub name: String,
pub scope: SymbolScope,
pub index: usize,
}
impl Default for Symbol {
fn default() -> Self {
Symbol {
name: String::from(""),
scope: SymbolScope::GLOBAL,
index: 0
}
}
}
impl Symbol {
pub fn new(name: &str, scope: SymbolScope, index: usize) -> Self {
Symbol {
name: String::from(name),
scope,
index,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct SymbolTable {
pub outer: Option<Box<SymbolTable>>,
pub free_symbols: Vec<Symbol>,
store: HashMap<String, Symbol>,
pub num_definitions: usize,
}
impl SymbolTable {
pub fn new() -> Self {
SymbolTable {
outer: None,
free_symbols: vec![],
store: HashMap::new(),
num_definitions: 0,
}
}
pub fn new_enclosed(outer: SymbolTable) -> Self {
SymbolTable {
outer: Some(Box::new(outer)),
free_symbols: vec![],
store: HashMap::new(),
num_definitions: 0
}
}
pub fn define(&mut self, name: &str) -> Symbol {
let scope = match &self.outer {
Some(s) => SymbolScope::LOCAL,
None => SymbolScope::GLOBAL,
};
let symbol = Symbol {
name: name.to_string(),
scope,
index: self.num_definitions,
};
self.store.insert(name.to_string(), symbol.clone()); //FIXME: clone?
self.num_definitions += 1;
symbol
}
pub fn define_builtin(&mut self, index: usize, name: String) -> Symbol {
let symbol = Symbol{
name: name.clone(),
scope: SymbolScope::BUILTIN,
index
};
self.store.insert(name, symbol.clone()); //TODO: rc
symbol
}
pub fn define_free(&mut self, name: &String, original: &Symbol) -> Symbol {
self.free_symbols.push(original.clone());
let symbol = Symbol{
name: name.clone(),
scope: SymbolScope::FREE,
index: self.free_symbols.len() - 1
};
self.store.insert(name.to_owned(), symbol.clone()); //TODO: rc
symbol
}
pub fn resolve(&mut self, name: &str) -> Option<Symbol> {
match self.store.get(name) {
Some(v) => Some(v.clone()),
None => {
match &mut self.outer {
Some(o) => {
match o.resolve(name) {
Some(obj) => {
match obj.scope {
SymbolScope::GLOBAL | SymbolScope::BUILTIN => {
Some(obj)
}
_ => {
let sym = self.define_free(&obj.name, &obj);
Some(sym)
}
}
},
None => None
}
}
None => None
}
}
}
}
}
| true |
ffcde8b2db25184b4e150811eb58a951369a8dc6
|
Rust
|
timvermeulen/advent-of-code
|
/src/solutions/year2018/day23.rs
|
UTF-8
| 6,659 | 3.71875 | 4 |
[] |
no_license
|
use super::*;
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
struct Point {
x: i32,
y: i32,
z: i32,
}
impl Point {
fn distance_to(&self, position: Point) -> i32 {
(self.x - position.x).abs() + (self.y - position.y).abs() + (self.z - position.z).abs()
}
fn distance_to_origin(&self) -> i32 {
self.x.abs() + self.y.abs() + self.z.abs()
}
}
impl Ord for Point {
fn cmp(&self, other: &Self) -> Ordering {
self.distance_to_origin().cmp(&(other.distance_to_origin()))
}
}
impl PartialOrd for Point {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// A cube with a side length of a power of 2.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
struct Cube {
origin: Point,
side_length: i32,
}
impl Cube {
fn new(origin: Point, exponent: u32) -> Cube {
let side_length = 1 << exponent;
Cube {
origin,
side_length,
}
}
/// Splits the cube into smaller cubes each with half the side
/// length of the original cube.
fn split(&self) -> [Self; 8] {
let side_length = self.side_length / 2;
let cube = |x: bool, y: bool, z: bool| -> Self {
let extra = |flag: bool| if flag { side_length } else { 0 };
Self {
origin: Point {
x: self.origin.x + extra(x),
y: self.origin.y + extra(y),
z: self.origin.z + extra(z),
},
side_length,
}
};
[
cube(false, false, false),
cube(false, false, true),
cube(false, true, false),
cube(false, true, true),
cube(true, false, false),
cube(true, false, true),
cube(true, true, false),
cube(true, true, true),
]
}
/// Returns the origin if that's the only point inside the cube,
/// and `None` otherwise.
fn only_point(&self) -> Option<Point> {
if self.side_length == 1 {
Some(self.origin)
} else {
None
}
}
}
#[derive(Debug)]
struct NanoBot {
position: Point,
radius: i32,
}
impl NanoBot {
/// Returns `true` if the given point is in range of the bot.
fn reaches_point(&self, point: Point) -> bool {
self.position.distance_to(point) <= self.radius
}
/// Returns `true` if any point of the cube is in range of the bot.
fn reaches_cube(&self, cube: Cube) -> bool {
let distance = |bot: i32, cube_origin: i32| {
if bot < cube_origin {
cube_origin - bot
} else if bot >= cube_origin + cube.side_length {
bot - cube_origin - cube.side_length + 1
} else {
0
}
};
distance(self.position.x, cube.origin.x)
+ distance(self.position.y, cube.origin.y)
+ distance(self.position.z, cube.origin.z)
<= self.radius
}
}
/// A cube that can be compared based on how many bots are in range of it.
#[derive(Debug)]
struct OrdCube {
cube: Cube,
bots_in_range: usize,
}
impl PartialEq for OrdCube {
fn eq(&self, other: &Self) -> bool {
self.cube == other.cube
}
}
impl Eq for OrdCube {}
impl Ord for OrdCube {
fn cmp(&self, other: &OrdCube) -> Ordering {
self.bots_in_range
.cmp(&other.bots_in_range)
// if both cubes have the same number of bots in range,
// the one closer to the origin is considered "larger"
.then(other.cube.origin.cmp(&self.cube.origin))
}
}
impl PartialOrd for OrdCube {
fn partial_cmp(&self, other: &OrdCube) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl OrdCube {
fn new(cube: Cube, bots: &[NanoBot]) -> OrdCube {
let bots_in_range = bots.iter().filter(|b| b.reaches_cube(cube)).count();
OrdCube {
cube,
bots_in_range,
}
}
/// Splits the cube into smaller cubes each with half the side
/// length of the original cube.
fn split(&self, bots: &[NanoBot]) -> Vec<Self> {
self.cube
.split()
.iter()
.map(|&c| OrdCube::new(c, bots))
.collect()
}
/// Returns the origin if that's the only point inside the cube,
/// and `None` otherwise.
fn only_point(&self) -> Option<Point> {
self.cube.only_point()
}
}
fn parser<'a>() -> impl Parser<&'a str, Output = Vec<NanoBot>> {
let position = parser::i32()
.sep_by(token(','), |iter| {
Some(Point {
x: iter.next()?,
y: iter.next()?,
z: iter.next()?,
})
})
.between(token('<'), token('>'));
let bot = chain((string("pos="), position, string(", r="), parser::i32()))
.map(|(_, position, _, radius)| NanoBot { position, radius });
bot.collect_sep_by(token('\n'))
}
fn part1(bots: &[NanoBot]) -> u32 {
let bot = bots.iter().max_by_key(|bot| bot.radius).unwrap();
bots.iter()
.filter(|b| bot.reaches_point(b.position))
.count() as u32
}
fn part2(bots: &[NanoBot]) -> i32 {
let xs = || bots.iter().map(|b| b.position.x);
let ys = || bots.iter().map(|b| b.position.y);
let zs = || bots.iter().map(|b| b.position.z);
let min_x = xs().min().unwrap();
let max_x = xs().max().unwrap();
let min_y = ys().min().unwrap();
let max_y = ys().max().unwrap();
let min_z = zs().min().unwrap();
let max_z = zs().max().unwrap();
let size = cmp::max(cmp::max(max_x - min_x, max_y - min_y), max_z - min_z);
let exponent = 8 * mem::size_of::<i32>() as u32 - size.leading_zeros();
let origin = Point {
x: min_x,
y: min_y,
z: min_z,
};
let cube = OrdCube::new(Cube::new(origin, exponent), &bots);
let mut heap = BinaryHeap::new();
heap.push(cube);
while let Some(cube) = heap.pop() {
match cube.only_point() {
None => cube.split(bots).into_iter().for_each(|c| heap.push(c)),
Some(point) => return point.distance_to_origin(),
}
}
unreachable!()
}
pub fn solve(input: &str) -> (u32, i32) {
let bots = parser().parse_to_end(&input).unwrap();
(part1(&bots), part2(&bots))
}
#[async_std::test]
async fn test() -> Result<(), InputError> {
let input = get_input(2018, 23).await?;
let bots = parser().parse_to_end(&input).unwrap();
assert_eq!(part1(&bots), 761);
assert_eq!(part2(&bots), 89_915_526);
Ok(())
}
| true |
9f3fe51ed2d3c495222e1eb1ad8ce20300978bf2
|
Rust
|
bmc-msft/command-group
|
/src/stdlib/child/unix.rs
|
UTF-8
| 3,920 | 2.609375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use std::{
convert::TryInto,
io::{Error, ErrorKind, Read, Result},
os::unix::{
io::{AsRawFd, RawFd},
process::ExitStatusExt,
},
process::{Child, ChildStderr, ChildStdin, ChildStdout, ExitStatus},
};
use nix::{
errno::Errno,
libc,
poll::{poll, PollFd, PollFlags},
sys::{
signal::{killpg, Signal},
wait::WaitPidFlag,
},
unistd::Pid,
};
pub(super) struct ChildImp {
pgid: Pid,
inner: Child,
}
impl ChildImp {
pub(super) fn new(inner: Child) -> Self {
Self {
pgid: Pid::from_raw(inner.id().try_into().expect("Command PID > i32::MAX")),
inner,
}
}
pub(super) fn take_stdin(&mut self) -> Option<ChildStdin> {
self.inner.stdin.take()
}
pub(super) fn take_stdout(&mut self) -> Option<ChildStdout> {
self.inner.stdout.take()
}
pub(super) fn take_stderr(&mut self) -> Option<ChildStderr> {
self.inner.stderr.take()
}
pub fn inner(&mut self) -> &mut Child {
&mut self.inner
}
pub fn into_inner(self) -> Child {
self.inner
}
pub(super) fn signal_imp(&mut self, sig: Signal) -> Result<()> {
killpg(self.pgid, sig).map_err(Error::from)
}
pub fn kill(&mut self) -> Result<()> {
self.signal_imp(Signal::SIGKILL)
}
pub fn id(&self) -> u32 {
self.inner.id()
}
fn wait_imp(&mut self, flag: WaitPidFlag) -> Result<Option<ExitStatus>> {
let negpid = Pid::from_raw(-self.pgid.as_raw());
// we can't use the safe wrapper directly because it doesn't return the raw status, and we
// need it to convert to the std's ExitStatus.
let mut status: i32 = 0;
match unsafe { libc::waitpid(negpid.into(), &mut status as *mut libc::c_int, flag.bits()) }
{
0 => Ok(None),
res => Errno::result(res)
.map_err(Error::from)
.map(|_| Some(ExitStatus::from_raw(status))),
}
}
pub fn wait(&mut self) -> Result<ExitStatus> {
self.wait_imp(WaitPidFlag::empty())
.transpose()
.unwrap_or_else(|| {
Err(Error::new(
ErrorKind::Other,
"blocking waitpid returned pid=0",
))
})
}
pub fn try_wait(&mut self) -> Result<Option<ExitStatus>> {
self.wait_imp(WaitPidFlag::WNOHANG)
}
pub(super) fn read_both(
mut out_r: ChildStdout,
out_v: &mut Vec<u8>,
mut err_r: ChildStderr,
err_v: &mut Vec<u8>,
) -> Result<()> {
let out_fd = out_r.as_raw_fd();
let err_fd = err_r.as_raw_fd();
set_nonblocking(out_fd, true)?;
set_nonblocking(err_fd, true)?;
let mut fds = [
PollFd::new(out_fd, PollFlags::POLLIN),
PollFd::new(err_fd, PollFlags::POLLIN),
];
loop {
poll(&mut fds, -1)?;
if fds[0].revents().is_some() && read(&mut out_r, out_v)? {
set_nonblocking(err_fd, false)?;
return err_r.read_to_end(err_v).map(drop);
}
if fds[1].revents().is_some() && read(&mut err_r, err_v)? {
set_nonblocking(out_fd, false)?;
return out_r.read_to_end(out_v).map(drop);
}
}
fn read(r: &mut impl Read, dst: &mut Vec<u8>) -> Result<bool> {
match r.read_to_end(dst) {
Ok(_) => Ok(true),
Err(e) => {
if e.raw_os_error() == Some(libc::EWOULDBLOCK)
|| e.raw_os_error() == Some(libc::EAGAIN)
{
Ok(false)
} else {
Err(e)
}
}
}
}
#[cfg(target_os = "linux")]
fn set_nonblocking(fd: RawFd, nonblocking: bool) -> Result<()> {
let v = nonblocking as libc::c_int;
let res = unsafe { libc::ioctl(fd, libc::FIONBIO, &v) };
Errno::result(res).map_err(Error::from).map(drop)
}
#[cfg(not(target_os = "linux"))]
fn set_nonblocking(fd: RawFd, nonblocking: bool) -> Result<()> {
use nix::fcntl::{fcntl, FcntlArg, OFlag};
let mut flags = OFlag::from_bits_truncate(fcntl(fd, FcntlArg::F_GETFL)?);
flags.set(OFlag::O_NONBLOCK, nonblocking);
fcntl(fd, FcntlArg::F_SETFL(flags))
.map_err(Error::from)
.map(drop)
}
}
}
pub trait UnixChildExt {
fn signal(&mut self, sig: Signal) -> Result<()>;
}
impl UnixChildExt for ChildImp {
fn signal(&mut self, sig: Signal) -> Result<()> {
self.signal_imp(sig)
}
}
| true |
4fb51dd792c567b093a017b005efa41186ec2e2d
|
Rust
|
mmitteregger/cuke-runner
|
/core/glue/src/scenario.rs
|
UTF-8
| 2,451 | 3.203125 | 3 |
[
"MIT"
] |
permissive
|
use std::any::{Any, TypeId};
use std::collections::HashMap;
use std::fmt;
use failure::Fail;
#[derive(Debug, Default)]
pub struct Scenario {
data: HashMap<TypeId, Box<dyn Any>>,
}
impl Scenario {
#[doc(hidden)]
pub fn new() -> Scenario {
Scenario {
data: HashMap::new(),
}
}
pub fn set<T: 'static>(&mut self, data: T) {
self.data.insert(TypeId::of::<T>(), Box::new(data));
}
pub fn get<T: 'static>(&self) -> Option<&T> {
self.data.get(&TypeId::of::<T>())
.map(|value| value.downcast_ref::<T>().unwrap())
}
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.data.get_mut(&TypeId::of::<T>())
.map(|value| value.downcast_mut::<T>().unwrap())
}
}
pub type FromScenarioResult<T> = ::std::result::Result<T, FromScenarioError>;
pub trait FromScenario<'a>: Sized {
fn from_scenario(scenario: &'a Scenario) -> FromScenarioResult<Self>;
}
pub trait FromScenarioMut<'a>: Sized {
fn from_scenario_mut(scenario: &'a mut Scenario) -> FromScenarioResult<Self>;
}
/// The error holding information for a failed `FromScenario` conversion.
#[derive(Fail, Debug)]
pub struct FromScenarioError {
pub message: String,
}
impl FromScenarioError {
pub fn new<S: Into<String>>(message: S) -> FromScenarioError {
FromScenarioError {
message: message.into()
}
}
}
impl fmt::Display for FromScenarioError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.message.fmt(f)
}
}
impl From<String> for FromScenarioError {
fn from(message: String) -> FromScenarioError {
FromScenarioError {
message
}
}
}
impl<'a> FromScenario<'a> for &'a Scenario {
fn from_scenario(scenario: &'a Scenario) -> FromScenarioResult<&'a Scenario> {
Ok(scenario)
}
}
impl<'a> FromScenarioMut<'a> for &'a mut Scenario {
fn from_scenario_mut(scenario: &'a mut Scenario) -> FromScenarioResult<&'a mut Scenario> {
Ok(scenario)
}
}
impl<'a, T: 'static> FromScenario<'a> for Option<&'a T> {
fn from_scenario(scenario: &'a Scenario) -> FromScenarioResult<Option<&'a T>> {
Ok(scenario.get::<T>())
}
}
impl<'a, T: 'static> FromScenarioMut<'a> for Option<&'a mut T> {
fn from_scenario_mut(scenario: &'a mut Scenario) -> FromScenarioResult<Option<&'a mut T>> {
Ok(scenario.get_mut::<T>())
}
}
| true |
6b9c993c27451b9ea58b0d4792fd8f44f5dc2941
|
Rust
|
trussed-dev/trussed
|
/src/virt/store.rs
|
UTF-8
| 6,591 | 2.578125 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] |
permissive
|
use std::{
fs::{File, OpenOptions},
io::{Read as _, Seek as _, SeekFrom, Write as _},
marker::PhantomData,
path::PathBuf,
};
use generic_array::typenum::{U512, U8};
use littlefs2::{const_ram_storage, driver::Storage, fs::Allocation};
use crate::{
store,
store::Store,
types::{LfsResult, LfsStorage},
};
pub trait StoreProvider {
type Store: Store;
unsafe fn ifs() -> &'static mut <Self::Store as Store>::I;
unsafe fn store() -> Self::Store;
unsafe fn reset(&self);
}
const STORAGE_SIZE: usize = 512 * 128;
static mut INTERNAL_RAM_STORAGE: Option<InternalStorage> = None;
static mut INTERNAL_RAM_FS_ALLOC: Option<Allocation<InternalStorage>> = None;
static mut INTERNAL_FILESYSTEM_STORAGE: Option<FilesystemStorage> = None;
static mut INTERNAL_FILESYSTEM_FS_ALLOC: Option<Allocation<FilesystemStorage>> = None;
static mut EXTERNAL_STORAGE: Option<ExternalStorage> = None;
static mut EXTERNAL_FS_ALLOC: Option<Allocation<ExternalStorage>> = None;
static mut VOLATILE_STORAGE: Option<VolatileStorage> = None;
static mut VOLATILE_FS_ALLOC: Option<Allocation<VolatileStorage>> = None;
const_ram_storage!(InternalStorage, STORAGE_SIZE);
const_ram_storage!(ExternalStorage, STORAGE_SIZE);
const_ram_storage!(VolatileStorage, STORAGE_SIZE);
pub struct FilesystemStorage(PathBuf);
impl Storage for FilesystemStorage {
const READ_SIZE: usize = 16;
const WRITE_SIZE: usize = 16;
const BLOCK_SIZE: usize = 512;
const BLOCK_COUNT: usize = 128;
const BLOCK_CYCLES: isize = -1;
type CACHE_SIZE = U512;
type LOOKAHEAD_SIZE = U8;
fn read(&mut self, offset: usize, buffer: &mut [u8]) -> LfsResult<usize> {
debug!("read: offset: {}, len: {}", offset, buffer.len());
let mut file = File::open(&self.0).unwrap();
file.seek(SeekFrom::Start(offset as _)).unwrap();
let bytes_read = file.read(buffer).unwrap();
assert!(bytes_read <= buffer.len());
Ok(bytes_read as _)
}
fn write(&mut self, offset: usize, data: &[u8]) -> LfsResult<usize> {
debug!("write: offset: {}, len: {}", offset, data.len());
if offset + data.len() > STORAGE_SIZE {
return Err(littlefs2::io::Error::NoSpace);
}
let mut file = OpenOptions::new().write(true).open(&self.0).unwrap();
file.seek(SeekFrom::Start(offset as _)).unwrap();
let bytes_written = file.write(data).unwrap();
assert_eq!(bytes_written, data.len());
file.flush().unwrap();
Ok(bytes_written)
}
fn erase(&mut self, offset: usize, len: usize) -> LfsResult<usize> {
debug!("erase: offset: {}, len: {}", offset, len);
if offset + len > STORAGE_SIZE {
return Err(littlefs2::io::Error::NoSpace);
}
let mut file = OpenOptions::new().write(true).open(&self.0).unwrap();
file.seek(SeekFrom::Start(offset as _)).unwrap();
let zero_block = [0xFFu8; Self::BLOCK_SIZE];
for _ in 0..(len / Self::BLOCK_SIZE) {
let bytes_written = file.write(&zero_block).unwrap();
assert_eq!(bytes_written, Self::BLOCK_SIZE);
}
file.flush().unwrap();
Ok(len)
}
}
store!(
FilesystemStore,
Internal: FilesystemStorage,
External: ExternalStorage,
Volatile: VolatileStorage
);
impl Default for FilesystemStore {
fn default() -> Self {
Self { __: PhantomData }
}
}
#[derive(Clone, Debug)]
pub struct Filesystem {
internal: PathBuf,
format: bool,
}
impl Filesystem {
pub fn new(internal: impl Into<PathBuf>) -> Self {
let internal = internal.into();
let len = u64::try_from(STORAGE_SIZE).unwrap();
let format = if let Ok(file) = File::open(&internal) {
assert_eq!(file.metadata().unwrap().len(), len);
false
} else {
let file = File::create(&internal).expect("failed to create storage file");
file.set_len(len).expect("failed to set storage file len");
true
};
Self { internal, format }
}
}
impl StoreProvider for Filesystem {
type Store = FilesystemStore;
unsafe fn ifs() -> &'static mut FilesystemStorage {
INTERNAL_FILESYSTEM_STORAGE
.as_mut()
.expect("ifs not initialized")
}
unsafe fn store() -> Self::Store {
Self::Store { __: PhantomData }
}
unsafe fn reset(&self) {
INTERNAL_FILESYSTEM_STORAGE.replace(FilesystemStorage(self.internal.clone()));
INTERNAL_FILESYSTEM_FS_ALLOC.replace(littlefs2::fs::Filesystem::allocate());
reset_external();
reset_volatile();
Self::store()
.mount(
INTERNAL_FILESYSTEM_FS_ALLOC.as_mut().unwrap(),
INTERNAL_FILESYSTEM_STORAGE.as_mut().unwrap(),
EXTERNAL_FS_ALLOC.as_mut().unwrap(),
EXTERNAL_STORAGE.as_mut().unwrap(),
VOLATILE_FS_ALLOC.as_mut().unwrap(),
VOLATILE_STORAGE.as_mut().unwrap(),
self.format,
)
.expect("failed to mount filesystem");
}
}
store!(
RamStore,
Internal: InternalStorage,
External: ExternalStorage,
Volatile: VolatileStorage
);
#[derive(Copy, Clone, Debug, Default)]
pub struct Ram {}
impl StoreProvider for Ram {
type Store = RamStore;
unsafe fn ifs() -> &'static mut InternalStorage {
INTERNAL_RAM_STORAGE.as_mut().expect("ifs not initialized")
}
unsafe fn store() -> Self::Store {
Self::Store { __: PhantomData }
}
unsafe fn reset(&self) {
INTERNAL_RAM_STORAGE.replace(InternalStorage::new());
INTERNAL_RAM_FS_ALLOC.replace(littlefs2::fs::Filesystem::allocate());
reset_external();
reset_volatile();
Self::store()
.mount(
INTERNAL_RAM_FS_ALLOC.as_mut().unwrap(),
INTERNAL_RAM_STORAGE.as_mut().unwrap(),
EXTERNAL_FS_ALLOC.as_mut().unwrap(),
EXTERNAL_STORAGE.as_mut().unwrap(),
VOLATILE_FS_ALLOC.as_mut().unwrap(),
VOLATILE_STORAGE.as_mut().unwrap(),
true,
)
.expect("failed to mount filesystem");
}
}
unsafe fn reset_external() {
EXTERNAL_STORAGE.replace(ExternalStorage::new());
EXTERNAL_FS_ALLOC.replace(littlefs2::fs::Filesystem::allocate());
}
unsafe fn reset_volatile() {
VOLATILE_STORAGE.replace(VolatileStorage::new());
VOLATILE_FS_ALLOC.replace(littlefs2::fs::Filesystem::allocate());
}
| true |
8fdd027fb346935d43419bf99cd225bf970aee92
|
Rust
|
seppo0010/rusttojs
|
/test/call3.rs
|
UTF-8
| 157 | 3.09375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
struct MyStruct { n: i8 }
impl MyStruct {
fn get(&self) -> i8 { self.n }
}
fn main() {
let myvar = MyStruct { n: 3 };
println!("{}", myvar.get());
}
| true |
d0cd428bd8b7c21706bd82d657957a8d7caa2c38
|
Rust
|
barabadzhi/improvement-mkp
|
/src/knapsack/neighborhood.rs
|
UTF-8
| 3,011 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
use std::sync::RwLock;
use knapsack::item::Item;
use knapsack::rayon::prelude::*;
use knapsack::statistics::Statistics;
#[derive(Debug)]
pub struct Neighborhood<'a> {
pub result: &'a Statistics,
pub base_items: Vec<&'a Item>,
pub neighbors: Vec<(usize, &'a Item)>,
}
impl<'a> Neighborhood<'a> {
pub fn new(items: &'a [Item], result: &'a Statistics) -> Neighborhood<'a> {
let (base_items, not_picked_items): (Vec<&Item>, Vec<&Item>) = items
.into_iter()
.partition(|&item| result.picked_items.contains(&item.id));
debug_assert_eq!(
not_picked_items.len(),
items.len() - result.picked_items.len()
);
let mut neighbors = Vec::with_capacity(result.picked_items.len() * not_picked_items.len());
for item in not_picked_items {
for index in 0..result.picked_items.len() {
let mut neighbor = (index, item);
neighbors.push(neighbor);
}
}
Neighborhood {
result,
base_items,
neighbors,
}
}
pub fn best_neighbor(&self, capacity: &[u32]) -> Statistics {
let result = RwLock::new(Statistics::new());
self.neighbors.par_iter().for_each(|neighbor| {
let mut capacity_left = capacity.to_vec();
let mut internal_result = Statistics::new();
let mut items = self.base_items.clone();
items[neighbor.0] = neighbor.1;
for item in items {
let mut item_can_be_used = false;
for (constraint_index, constraint) in capacity_left.iter().enumerate() {
if item.weights[constraint_index] > *constraint {
item_can_be_used = false;
break;
} else {
item_can_be_used = true;
}
}
if item_can_be_used {
for (constraint_index, constraint) in capacity_left.iter_mut().enumerate() {
*constraint -= item.weights[constraint_index];
}
internal_result.picked_items.push(item.id);
internal_result.total_profit += item.profit;
}
}
if internal_result.total_profit > result.read().unwrap().total_profit {
for (left, total) in capacity_left.iter().zip(capacity.iter()) {
internal_result.utilization.push(format!(
"{:.2}%",
((f64::from(*total - *left) / f64::from(*total)) * 100_f64)
))
}
*result.write().unwrap() = internal_result.clone();
}
});
let result = result.into_inner().unwrap();
if result.total_profit > self.result.total_profit {
result
} else {
self.result.clone()
}
}
}
| true |
58d17c1f9e6001b2cb047bae8185afcc7be5e310
|
Rust
|
nraynaud/mpu9250
|
/src/ak8963.rs
|
UTF-8
| 1,028 | 2.75 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! AK8963, I2C magnetometer
// I2C slave address
pub const I2C_ADDRESS: u8 = 0x0c;
pub const R: u8 = 1 << 7;
pub const W: u8 = 0 << 7;
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
pub enum Register {
WHO_AM_I = 0x00, // should return 0x48
INFO = 0x01,
ST1 = 0x02, // data ready status bit 0
XOUT_L = 0x03, // data
XOUT_H = 0x04,
YOUT_L = 0x05,
YOUT_H = 0x06,
ZOUT_L = 0x07,
ZOUT_H = 0x08,
ST2 = 0x09, // Data overflow bit 3 and data read error status bit 2
CNTL = 0x0A, /* Power down (0000), single-measurement (0001), self-test
* (1000) and Fuse ROM (1111) modes on bits 3:0 */
ASTC = 0x0C, // Self test control
I2CDIS = 0x0F, // I2C disable
ASAX = 0x10, // Fuse ROM x-axis sensitivity adjustment value
ASAY = 0x11, // Fuse ROM y-axis sensitivity adjustment value
ASAZ = 0x12, // Fuse ROM z-axis sensitivity adjustment value
}
impl Register {
pub fn addr(&self) -> u8 {
*self as u8
}
}
| true |
7da1f80d1d7ea7e9f9ee3558b511ac0b27215683
|
Rust
|
waynedupreez1/eeprom34c04-rs
|
/src/eeprom34c04.rs
|
UTF-8
| 11,533 | 3.015625 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use hal::blocking::i2c::{Write, WriteRead, Read};
use Eeprom34c04;
use SlaveAddr;
use super::error;
//Constants
const RW_FUNC_BITS: u8 = 0b1010000;
// Page Address Functions bits:)
const PA_FUNC_BITS: u8 = 0b0110110;
impl<I2C, E> Eeprom34c04<I2C>
where I2C: Write<Error = E> + WriteRead<Error = E>,
{
/// Create a new instance of a 34c00 device
pub fn new_34c04(i2c: I2C, address: SlaveAddr) -> Self {
//Converts adress bits and ors to read_write function
let rw_func_bits = match address {
SlaveAddr::A2A1A0(a2, a1, a0) => {
RW_FUNC_BITS | ((a2 as u8) << 2) | ((a1 as u8) << 1) | a0 as u8
}
};
Eeprom34c04 {
i2c: i2c,
rw_func_bits: rw_func_bits,
last_addr_w: 0,
last_addr_r: 0,
}
}
}
/// Common methods
impl<I2C> Eeprom34c04<I2C> {
/// Destroy driver instance, return I²C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
}
/// Common methods
impl<I2C, E> Eeprom34c04<I2C>
where I2C: Write<Error = E> + WriteRead<Error = E> + Read<Error = E> {
/// Write a single byte in an address.
///
/// After writing a byte, the EEPROM enters an internally-timed write cycle
/// to the nonvolatile memory.
/// During this time all inputs are disabled and the EEPROM will not
/// respond until the write is complete.
pub fn write_byte(&mut self, address: u32, data: u8) -> Result<(), error::Error<E>> {
addr_in_bounds(address)?;
let (page_addr, mem_addr) = addr_convert(address)?;
self.last_addr_w = address;
let spa_dont_care = [0; 2];
self.i2c.write(page_addr, &spa_dont_care).map_err(error::Error::I2C)?;
let array = [mem_addr, data];
self.i2c.write(self.rw_func_bits, &array).map_err(error::Error::I2C)?;
Ok(())
}
/// Read a single byte from an address.
pub fn read_byte(&mut self, address: u32) -> Result<u8, error::Error<E>> {
addr_in_bounds(address)?;
let (page_addr, mem_addr) = addr_convert(address)?;
self.last_addr_r = address;
let spa_dont_care = [0; 2];
self.i2c.write(page_addr, &spa_dont_care).map_err(error::Error::I2C)?;
let memaddr = [mem_addr];
let mut data = [0; 1];
self.i2c.write_read(self.rw_func_bits, &memaddr, &mut data).map_err(error::Error::I2C).and(Ok(data[0]))
}
/// Read a multiple bytes from an address.
///
///
pub fn read_byte_array(&mut self, address: u32, data: &mut [u8]) -> Result<(), error::Error<E>> {
addr_in_bounds(address)?;
addr_in_bounds_page_wr(address, data.len() as u32)?;
let (page_addr, mem_addr) = addr_convert(address)?;
self.last_addr_r = address;
let spa_dont_care = [0; 2];
self.i2c.write(page_addr, &spa_dont_care).map_err(error::Error::I2C)?;
let memaddr = [mem_addr];
//Dummy read write else the sequential
//reading only reads the first value correctly
let mut dummy_data = [0; 1];
self.i2c.write_read(self.rw_func_bits, &memaddr, &mut dummy_data).map_err(error::Error::I2C)?;
self.i2c.write_read(self.rw_func_bits, &memaddr, data).map_err(error::Error::I2C)
}
/// Write multiple bytes to address.
///
/// Maximum allowed data to be written to eeprom in 1 go is 16 bytes
///
/// The function will allow the following byte array sizes to be passed
/// 1. 2 bytes
/// 2. 4 bytes
/// 3. 8 bytes
/// 4. 16 bytes
/// If you pass anything else the InvalidDataArrayMultiple will be returned
///
pub fn write_byte_array(&mut self, address: u32, data_array: &[u8]) -> Result<(), error::Error<E>> {
//Only allowed up to 16 bytes to be written
if data_array.len() > 16 { return Err(error::Error::TooMuchData) };
addr_in_bounds(address)?;
addr_in_bounds_page_wr(address, data_array.len() as u32)?;
let (page_addr, mem_addr) = addr_convert(address)?;
self.last_addr_w = address;
let spa_dont_care = [0; 2];
self.i2c.write(page_addr, &spa_dont_care).map_err(error::Error::I2C)?;
match data_array.len() {
2 => {
let array = [mem_addr, data_array[0], data_array[1] ];
self.i2c.write(self.rw_func_bits, &array).map_err(error::Error::I2C)?;
}
4 => {
let array = [mem_addr, data_array[0], data_array[1], data_array[2], data_array[3] ];
self.i2c.write(self.rw_func_bits, &array).map_err(error::Error::I2C)?;
}
8 => {
let array = [mem_addr, data_array[0], data_array[1], data_array[2], data_array[3],
data_array[4], data_array[5], data_array[6], data_array[7] ];
self.i2c.write(self.rw_func_bits, &array).map_err(error::Error::I2C)?;
}
16 => {
let array = [mem_addr, data_array[0], data_array[1], data_array[2], data_array[3],
data_array[4], data_array[5], data_array[6], data_array[7],
data_array[8], data_array[9], data_array[10],data_array[11],
data_array[12],data_array[13],data_array[14],data_array[15] ];
self.i2c.write(self.rw_func_bits, &array).map_err(error::Error::I2C)?;
}
_ => { return Err(error::Error::InvalidDataArrayMultiple) }
}
Ok(())
}
/// Previously read address
pub fn previous_read_addr(&self) -> u32 {
self.last_addr_r
}
/// Previously read address
pub fn previous_write_addr(&self) -> u32 {
self.last_addr_w
}
}
//Private
/// When doing multi byte reads and writes we have to ensure we
/// are far away from the ends of the particular memory quad
/// we are operating in
///
fn addr_in_bounds_page_wr<E>(address: u32, data_size: u32) -> Result<(), error::Error<E>> {
let (page_addr, mem_addr) = addr_convert(address)?;
//If we are in memory quad 0 or 2 then the adress can be a max value of 0x7F
if (mem_addr >> 7) == 0 {
if (mem_addr as u32 + data_size) <= 0x7F { return Ok(()) }
else { return Err(error::Error::TooMuchData) };
};
//If we are in memory quad 1 or 3 then the adress can be a max value of 0xFF
if (mem_addr as u32 + data_size) <= 0xFF { return Ok(()) }
else { return Err(error::Error::TooMuchData) }
}
/// Check if the adress requested is in bounds
/// The maximum adress can be 1FF = 511 = 0000 0001 1111 1111
/// for this 512 byte eeprom
///
fn addr_in_bounds<E>(address: u32) -> Result<(), error::Error<E>> {
let val = address >> 9;
if val == 0 {Ok(())}
else {Err(error::Error::InvalidAddr)}
}
/// This converts the adress as given by a 16 bit value decribed in address_in_bounds
/// to the appropriate memory quadrant 0/1 (page_address 0 ), or 2/3 (page_address 1)
/// tuple.0 = page address
/// tuple.1 = memory adress ranging from 0 - 255
///
/// Lower memory
/// Quadrant 0 can save bytes 0 - 127
/// Quadrant 1 can save bytes 128 - 255
///
/// Upper memory
/// Quadrant 2 can save bytes 0 - 127
/// Quadrant 3 can save bytes 128 - 255
///
fn addr_convert<E>(address: u32) -> Result<(u8, u8), error::Error<E>> {
//In quad 0
if (address >> 7) == 0 {
return Ok((PA_FUNC_BITS, address as u8))
};
//In quad 1
if (address >> 8) == 0 {
return Ok((PA_FUNC_BITS, address as u8))
};
//In quad 2
//Mask the top bit and rotate
let new_addr = address & 0b011111111;
if (new_addr >> 7) == 0 {
return Ok((PA_FUNC_BITS | 1, new_addr as u8))
};
//In quad 3
let new_addr = address & 0b011111111;
if (new_addr >> 8) == 0 {
return Ok((PA_FUNC_BITS | 1, new_addr as u8))
};
Err(error::Error::InvalidAddrConvert)
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn call_address_in_bounds_with_condition_address_equal_0x100_result_should_pass() {
let addr = 0x100;
let result = addr_in_bounds::<error::Error<u8>>(addr).is_ok();
assert_eq!(result, true);
}
#[test]
fn call_address_in_bounds_with_condition_address_equal_0x200_result_should_fail() {
let addr = 0x200;
let result = addr_in_bounds::<error::Error<u8>>(addr).is_err();
assert_eq!(result, true);
}
#[test]
fn call_address_convert_with_condition_address_equal_0x7F_result_tuple_0_is_PA_FUNC_BITS_tuple_1_is_0x7F() {
let addr = 0x7F;
let quad = PA_FUNC_BITS;
let result = addr_convert::<error::Error<u8>>(addr).unwrap();
println!("{:?}", result);
assert_eq!(result.0, quad);
assert_eq!(result.1, addr as u8);
}
#[test]
fn call_address_convert_with_condition_address_equal_0xFF_result_tuple_0_is_PA_FUNC_BITS_tuple_1_is_0xFF() {
let addr = 0xFF;
let quad = PA_FUNC_BITS;
let result = addr_convert::<error::Error<u8>>(addr).unwrap();
println!("{:?}", result);
assert_eq!(result.0, quad);
assert_eq!(result.1, addr as u8);
}
#[test]
fn call_address_convert_with_condition_address_equal_0x17F_result_tuple_0_is_PA_FUNC_BITS_ored_1_tuple_1_is_0x7F() {
let addr = 0x17F;
let quad = PA_FUNC_BITS | 1;
let result = addr_convert::<error::Error<u8>>(addr).unwrap();
println!("{:?}", result);
assert_eq!(result.0, quad);
assert_eq!(result.1, addr as u8);
}
#[test]
fn call_address_convert_with_condition_address_equal_0x1FF_result_tuple_0_is_PA_FUNC_BITS_ored_1_tuple_1_is_0xFF() {
let addr = 0x1FF;
let quad = PA_FUNC_BITS | 1;
let result = addr_convert::<error::Error<u8>>(addr).unwrap();
println!("{:?}", result);
assert_eq!(result.0, quad);
assert_eq!(result.1, addr as u8);
}
#[test]
fn call_addr_in_bounds_page_wr_with_condition_address_0x7F_add_8_result_error() {
let quad0_addr_max = 0x7F;
let addr = quad0_addr_max;
let data_len = 8u32;
let result = addr_in_bounds_page_wr::<u8>(addr, data_len).is_err();
println!("{:?}", result);
assert_eq!(result, true);
}
#[test]
fn call_addr_in_bounds_page_wr_with_condition_address_0xFF_add_8_result_error() {
let quad0_addr_max = 0xFF;
let addr = quad0_addr_max;
let data_len = 8u32;
let result = addr_in_bounds_page_wr::<u8>(addr, data_len).is_err();
println!("{:?}", result);
assert_eq!(result, true);
}
#[test]
fn call_addr_in_bounds_page_wr_with_condition_address_0x77_add_8_result_no_error() {
let quad0_addr_max = 0x77;
let addr = quad0_addr_max;
let data_len = 8u32;
let result = addr_in_bounds_page_wr::<u8>(addr, data_len).is_ok();
println!("{:?}", result);
assert_eq!(result, true);
}
}
| true |
93b5d97d55e40d383cb73f94f07f69097b06c6ee
|
Rust
|
wonjin/rust_snake
|
/src/main.rs
|
UTF-8
| 1,111 | 2.78125 | 3 |
[] |
no_license
|
extern crate piston_window;
extern crate rand;
mod game;
mod snake;
mod draw;
use piston_window::types::Color;
use piston_window::*;
use self::game::Game;
use self::draw::to_coord;
const BACK_COLOR: Color = [0.5, 0.5, 0.5, 1.0];
fn main() {
let (w, h) = (10,10);
let (gui_w_u32, gui_h_u32) = (to_coord(w) as u32, to_coord(h) as u32);
let mut window: PistonWindow = WindowSettings::new("Snake", [gui_w_u32, gui_h_u32])
.exit_on_esc(true)
.build()
.unwrap();
let mut game = Game::new(w, h);
// Todo: Some 에 대해 좀 더 잘 알아 볼것!
// event와 key가 변수로 선언된것은 알겠음
while let Some(event) = window.next() {
if let Some(Button::Keyboard(key)) = event.press_args() {
game.key_pressed(key);
}
// Todo: Clouser에 대해서 알아봐야 함
// 람다 같은 것이라고 생각함
window.draw_2d(&event, |c, g| {
clear(BACK_COLOR, g);
game.draw(&c, g);
});
event.update(|args| {
game.update(args.dt);
});
}
}
| true |
736b0dcc860035e8b7c8113089511d988e68b983
|
Rust
|
pine/rust-hands-on
|
/ownership/src/main.rs
|
UTF-8
| 173 | 2.6875 | 3 |
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
fn main() {
let v = vec![1, 2, 3];
println!("{:?}", v);
// let v2 = v;
let v2 = &v;
println!("{:?}", v2);
println!("{:?}", v);
// v2[0] = 1;
}
| true |
6afdc2e547b5045038cc0fa8145cf336d680ff36
|
Rust
|
JustusFT/ultimate_tic_tac_toe
|
/terminal_game/src/main.rs
|
UTF-8
| 10,007 | 2.625 | 3 |
[] |
no_license
|
use base_game::game::Game;
use base_game::monte_carlo::MctsTree;
use rustyline::Editor;
use std::convert::TryFrom;
use std::io::{stdout, Write};
use std::time::Instant;
use termion::clear;
use termion::cursor;
use termion::raw::IntoRawMode;
const BOARD_DISPLAY: &'static str = " \
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
━━━━━━━━━━━━╋━━━━━━━━━━━━━╋━━━━━━━━━━━━\r
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
━━━━━━━━━━━━╋━━━━━━━━━━━━━╋━━━━━━━━━━━━\r
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
───┼───┼─── ┃ ───┼───┼─── ┃ ───┼───┼───\r
│ │ ┃ │ │ ┃ │ │ \r
";
type BigPieceStringArray = [&'static str; 5];
const BIG_X: BigPieceStringArray = [
" █▄│ │▄█ ",
"──▀█▄─▄█▀──",
" │███│ ",
"──▄█▀─▀█▄──",
" █▀│ │▀█ ",
];
const BIG_O: BigPieceStringArray = [
" ▄█▀▀▀█▄ ",
"─█▀┼───┼▀█─",
" █ │ │ █ ",
"─█▄┼───┼▄█─",
" ▀█▄▄▄█▀ ",
];
// these mark the coordinates where the top-left cell of a local board is located from the BOARD_DISPLAY
const X_CORNERS: [u16; 3] = [2, 16, 30];
const Y_CORNERS: [u16; 3] = [1, 7, 13];
// these mark the distance to the other cells of the local board, starting from the top left cell of the local board
const X_OFFSETS: [u16; 3] = [0, 4, 8];
const Y_OFFSETS: [u16; 3] = [0, 2, 4];
// converts board number into 2D coords (x, y)
// 0 is (0, 0), 8 is (2, 2)
fn board_coordinates(cell: usize) -> (usize, usize) {
assert!(cell < 9);
(cell % 3, cell / 3)
}
fn piece_to_char(piece: base_game::Piece) -> char {
match piece {
base_game::Piece::X => 'X',
base_game::Piece::O => 'O',
base_game::Piece::BLANK => ' ',
}
}
struct TerminalGame {
stdout: termion::raw::RawTerminal<std::io::Stdout>,
game: Game,
search_tree: MctsTree,
}
impl TerminalGame {
fn new() -> TerminalGame {
TerminalGame {
stdout: stdout().into_raw_mode().unwrap(),
game: Game::new(),
search_tree: MctsTree::new(),
}
}
// clear the screen and redraw the board
fn draw_board(&mut self) {
write!(
self.stdout,
"{clear}{move}{board}",
clear = clear::All,
move = cursor::Goto(1, 1),
board = BOARD_DISPLAY
)
.unwrap();
for i in 0..=8 {
match self.game.local_boards[i].claimer {
// for claimed boards, draw a big piece over the board
// for non-claimed boards, draw what pieces are currently on it
Some(base_game::Piece::X) => self.draw_big_piece(i, BIG_X),
Some(base_game::Piece::O) => self.draw_big_piece(i, BIG_O),
_ => {
for j in 0..=8 {
self.draw_piece(i, j)
}
}
}
}
// move the cursor to the bottom
write!(self.stdout, "\r\n").unwrap();
self.stdout.flush().unwrap();
}
fn redraw_with_error(&mut self, error: String) {
self.draw_board();
println!("\r{}", error);
}
// change a piece of the board in the terminal display
// pass in which local_board (from 1 to 9) has the cell that needs to be changed
// then do the same for the cell number
fn draw_piece(&mut self, local_board: usize, cell: usize) {
// the boards and cells indices only go up to 8
assert!(local_board < 9);
assert!(cell < 9);
// to target the coordinates of the target cell we do it in 2 steps:
// 1. go to the top-left of the target local board
// 2. offset the cursor to go on the right cell
let (corner_x, corner_y) = board_coordinates(local_board);
let (offset_x, offset_y) = board_coordinates(cell);
// then write the piece char at the target
write!(
self.stdout,
"{move}{piece}",
move = cursor::Goto(
u16::try_from(X_CORNERS[corner_x] + X_OFFSETS[offset_x]).ok().unwrap(),
u16::try_from(Y_CORNERS[corner_y] + Y_OFFSETS[offset_y]).ok().unwrap()
),
piece = piece_to_char(self.game.local_boards[local_board].board[cell])
)
.unwrap();
}
// for claimed boards, draw a big piece on top the board display
fn draw_big_piece(&mut self, local_board: usize, overlay: BigPieceStringArray) {
let (corner_x, corner_y) = board_coordinates(local_board);
for i in 0..5 {
write!(
self.stdout,
"{move}{line_text}",
move = cursor::Goto(
X_CORNERS[corner_x] - 1,
Y_CORNERS[corner_y] + i
),
line_text = overlay[usize::from(i)]
)
.unwrap();
}
}
// request input for next move
fn request_user_move(&mut self) {
let mut rl = Editor::<()>::new();
let current_board_index: u8;
match self.game.current_board {
Some(x) => {
current_board_index = x;
}
None => loop {
print!("\rInput board #");
let readline = rl.readline("> ");
match readline {
Ok(line) => {
// attempt to convert the string to a number
match line.parse::<u8>() {
Ok(board_number) => {
if board_number > 8 {
self.redraw_with_error(format!(
"Please insert a number from 0-8"
));
continue;
}
if self.game.local_boards[usize::from(board_number)].claimer == None
{
self.draw_board();
current_board_index = board_number;
break;
} else {
self.redraw_with_error(format!(
"Board #{} is already claimed!",
board_number
));
}
}
Err(_) => {
self.redraw_with_error(format!("Please insert a number from 0-8"))
}
}
}
_ => {}
}
},
};
loop {
println!("\rCurrent board: {}", current_board_index);
print!("\rInput cell #");
let readline = rl.readline("> ");
match readline {
Ok(line) => match line.parse::<u8>() {
Ok(cell_number) => {
if cell_number > 8 {
self.redraw_with_error(format!("Please insert a number from 0-8"));
continue;
}
if self.game.local_boards[usize::from(current_board_index)].board
[usize::from(cell_number)]
== base_game::Piece::BLANK
{
self.game.make_move(current_board_index, cell_number);
break;
} else {
self.redraw_with_error(format!(
"Cell #{} is already taken!",
cell_number
));
}
}
Err(_) => self.redraw_with_error(format!("Please insert a number from 0-8")),
},
_ => {}
}
}
}
}
fn main() {
let mut game = TerminalGame::new();
loop {
game.draw_board();
println!("\rYour turn.");
game.request_user_move();
game.draw_board();
println!("\rCPU is thinking...");
let begin = Instant::now();
let cpu_move = game
.search_tree
.evaluate_while(&mut game.game, |games_ran| {
return begin.elapsed().as_secs() < 10 && games_ran < 10000;
});
match cpu_move {
Some((a, b)) => {
game.game.make_move(a, b);
}
None => {
println!("CPU can't make a move!");
panic!();
}
}
}
}
| true |
5e2108384b4c28c3f5179b74a6a564e762217839
|
Rust
|
gwy15/leetcode
|
/src/1140.石子游戏-ii.rs
|
UTF-8
| 1,596 | 3.03125 | 3 |
[] |
no_license
|
/*
* @lc app=leetcode.cn id=1140 lang=rust
*
* [1140] 石子游戏 II
*/
struct Solution;
// @lc code=start
#[allow(unused)]
impl Solution {
pub fn stone_game_ii(piles: Vec<i32>) -> i32 {
let n = piles.len();
let mut post_sum = vec![0; n + 1];
for i in (0..n).rev() {
post_sum[i] = post_sum[i + 1] + piles[i];
}
let mut dp = vec![vec![0; n + 1]; n];
for i in (0..n).rev() {
for m in (1..=n).rev() {
// 可以拿到 [i, i + x],其中 x ∈ [1, 2m]
if i + 2 * m >= n {
dp[i][m] = post_sum[i];
continue;
}
let opponent_worst = (1..=2 * m).map(|x| dp[i + x][x.max(m)]).min().unwrap();
dp[i][m] = post_sum[i] - opponent_worst;
}
}
dp[0][1]
}
}
// @lc code=end
#[test]
fn test_solution() {
macro_rules! test {
($piles:tt, $ans:expr) => {
assert_eq!(
Solution::stone_game_ii(vec!$piles),
$ans
);
}
};
test!([2, 7, 9, 4, 4], 10);
test!([10], 10);
test!([0, 0, 0, 100, 0], 100);
test!([0, 0, 100], 0);
test!([9, 2, 2, 8, 3, 7, 9, 9], 29);
test!(
[
7468, 6245, 9261, 3958, 1986, 1074, 5677, 9386, 1408, 1384, 8811, 3885, 9678, 8470,
8893, 7514, 4941, 2148, 5217, 5425, 5307, 747, 1253, 3518, 5238, 5834, 9133, 8391,
6100, 3362, 7807, 2581, 6121, 7684, 8744, 9584, 4068, 7204, 4285, 8635
],
115357
);
}
| true |
6afdf185aed1b5f2c5dad1b2dda6dea6c48b0e45
|
Rust
|
gefjon/zeldesque
|
/src/color.rs
|
UTF-8
| 469 | 2.734375 | 3 |
[] |
no_license
|
pub type Color = [f32; 4];
pub type RawColor = [f32; 3];
pub fn with_opacity(color: RawColor, opacity: f32) -> Color {
[color[0], color[1], color[2], opacity]
}
pub const OPAQUE: f32 = 1.0;
pub const RED: RawColor = [1.0, 0.0, 0.0];
pub const GREEN: RawColor = [0.0, 1.0, 0.0];
pub const BLUE: RawColor = [0.0, 0.0, 1.0];
pub const BLACK: RawColor = [0.0, 0.0, 0.0];
pub const WHITE: RawColor = [1.0, 1.0, 1.0];
pub const LIGHT_GREY: RawColor = [0.9, 0.9, 0.9];
| true |
4b03beed9f98cf250506519df5991e5820902f73
|
Rust
|
jakyle/rust-algos
|
/src/smaller_than_the_current_number.rs
|
UTF-8
| 708 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
pub fn smaller_numbers_than_current(nums: Vec<i32>) -> Vec<i32> {
let mut result = vec![0; nums.len()];
for (i, x) in nums.iter().enumerate() {
for (j, y) in nums.iter().enumerate() {
if i == j {
continue;
}
if y < x {
result[i] += 1;
}
}
}
result
}
#[cfg(test)]
mod smaller_numbers_than_current_tests {
use super::*;
#[test]
fn smaller_numbers_than_current_test_one() {
// arrange
let test = vec![8, 1, 2, 2, 3];
// act
let result = smaller_numbers_than_current(test);
// assert
assert_eq!(result, vec![4, 0, 1, 1, 3]);
}
}
| true |
53c36cad96e7c8e6de05ff1ada3b836faa7a528f
|
Rust
|
rust-lang/thanks
|
/src/main.rs
|
UTF-8
| 19,851 | 2.515625 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
use git2::{Commit, Oid, Repository};
use mailmap::{Author, Mailmap};
use regex::{Regex, RegexBuilder};
use semver::Version;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Mutex;
use std::{cmp, fmt, str};
use config::Config;
use reviewers::Reviewers;
mod config;
mod error;
mod reviewers;
mod site;
use error::ErrorContext;
trait ToAuthor {
fn from_sig(sig: git2::Signature<'_>) -> Author;
}
impl ToAuthor for Author {
fn from_sig(sig: git2::Signature<'_>) -> Author {
let name = sig.name().unwrap_or_else(|| panic!("no name for {}", sig));
let email = sig
.email()
.unwrap_or_else(|| panic!("no email for {}", sig));
Author {
name: name.to_string(),
email: email.to_string(),
}
}
}
#[derive(Clone)]
pub struct AuthorMap {
// author -> [commits]
map: HashMap<Author, HashSet<Oid>>,
}
impl AuthorMap {
fn new() -> Self {
AuthorMap {
map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if !diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if !UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain .git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes() != b"bors" || commit.committer().name_bytes() != b"bors" {
if commit.committer().name_bytes() != b"GitHub" || !is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r| !r.is_empty())
.filter(|r| *r != "<try>")
.inspect(|r| {
if !r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message != "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if !is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&& !exclude.contains(&s.repository.as_str())
&& !exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn modules_file(repo: &Repository, at: &Commit) -> Result<String, Box<dyn std::error::Error>> {
if let Some(modules) = at.tree()?.get_name(".gitmodules") {
Ok(String::from_utf8(
modules.to_object(&repo)?.peel_to_blob()?.content().into(),
)?)
} else {
return Ok(String::new());
}
}
| true |
fed3678abf892207e590bc9d5a45be5e7c5ce057
|
Rust
|
TheBlueMatt/rust-lightning
|
/lightning-block-sync/src/http.rs
|
UTF-8
| 26,310 | 2.6875 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
//! Simple HTTP implementation which supports both async and traditional execution environments
//! with minimal dependencies. This is used as the basis for REST and RPC clients.
use chunked_transfer;
use serde_json;
use std::convert::TryFrom;
use std::fmt;
#[cfg(not(feature = "tokio"))]
use std::io::Write;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[cfg(feature = "tokio")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
#[cfg(feature = "tokio")]
use tokio::net::TcpStream;
#[cfg(not(feature = "tokio"))]
use std::io::BufRead;
use std::io::Read;
#[cfg(not(feature = "tokio"))]
use std::net::TcpStream;
/// Timeout for operations on TCP streams.
const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
/// Timeout for reading the first byte of a response. This is separate from the general read
/// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for
/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry
/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line| !line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn read_chunked_message_body() {
let body = "foo bar baz qux".repeat(32);
let chunked_content = MessageBody::ChunkedContent(body.clone());
let server = HttpServer::responding_with_ok::<String>(chunked_content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn reconnect_closed_connection() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
assert!(client.get::<BinaryResponse>("/foo", "foo.com").await.is_ok());
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[test]
fn from_bytes_into_binary_response() {
let bytes = b"foo";
match BinaryResponse::try_from(bytes.to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(&response.0, bytes),
}
}
#[test]
fn from_invalid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes()[..5].to_vec()) {
Err(_) => {},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn from_valid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes().to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(response.0, json),
}
}
}
| true |
29059a3f1cbdd37361fde505e2c1f1d0a6396adb
|
Rust
|
ChrisRG/CodingChallenges
|
/old_leetcode/easy/322_coin_change.rs
|
UTF-8
| 637 | 3.53125 | 4 |
[] |
no_license
|
// You are given an integer array coins representing coins of different denominations and an integer amount representing a total amount of money.
// Return the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1.
// You may assume that you have an infinite number of each kind of coin.
struct Solution;
impl Solution {
pub fn coin_change(coins: Vec<i32>, amount: i32) -> i32 {
5
}
}
fn main() {
let input1 = vec![1,2,5];
let sol = Solution::coin_change(input1, 11);
println!("Result: {}, Expected: 3", sol);
}
| true |
c1da12e94e7baf177ce47c6b2fcb453979f86957
|
Rust
|
benbrunton/pusoy-dos
|
/src/game/player.rs
|
UTF-8
| 2,400 | 3.53125 | 4 |
[] |
no_license
|
use cards::card::PlayerCard;
/// A player
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Player{
hand: Vec<PlayerCard>,
id: u64
}
impl Player{
/// creates a new `Player`
pub fn new(id: u64) -> Player{
Player{
hand: vec!(),
id: id
}
}
/// get the player id
pub fn get_id(&self) -> u64 {
self.id
}
/// give a player their hand
pub fn set_hand(&self, hand:Vec<PlayerCard>) -> Player {
Player{
hand: hand.clone(),
id: self.id
}
}
/// number of cards player has left
pub fn remaining_cards(&self) -> usize {
self.hand.len()
}
/// get the cards for a player
pub fn get_hand(&self) -> Vec<PlayerCard> {
self.hand.clone()
}
pub fn reverse_hand(&self) -> Player {
let reversed_hand = self.hand.iter().map(|&c|{c.reverse()}).collect::<Vec<PlayerCard>>();
Player{
hand: reversed_hand,
id: self.id
}
}
/// take some cards from a player
pub fn remove(&self, cards:&Vec<PlayerCard>) -> Player {
let mut hand = self.remove_jokers(cards);
for &card in cards {
match card {
PlayerCard::Joker(_)|
PlayerCard::Wildcard(_) => (),
_ => {
let reversed = card.reverse();
let pos = hand.iter().position(|&c| { card == c || reversed == c }).unwrap();
hand.remove(pos);
}
}
}
Player {
id: self.id,
hand: hand
}
}
pub fn remove_jokers(&self, cards:&Vec<PlayerCard>)-> Vec<PlayerCard> {
let mut new_hand = vec!();
let mut jokers = 0;
for card in cards.iter() {
match *card {
PlayerCard::Wildcard(_) => jokers += 1,
_ => ()
}
}
for card in self.hand.iter() {
match *card {
PlayerCard::Joker(n) => {
if jokers < 1 {
new_hand.push(PlayerCard::Joker(n));
}else {
jokers -= 1;
}
},
c => new_hand.push(c.to_owned())
}
}
new_hand
}
}
| true |
a957c17ba8d1910f4044ab1340ec06628a03da42
|
Rust
|
msoermc/rmc-core-2018-2019
|
/src/motor_controllers/print_motor.rs
|
UTF-8
| 1,729 | 3.46875 | 3 |
[] |
no_license
|
use std::sync::Arc;
use super::*;
const FLOAT_ERROR: f32 = 0.05;
pub struct PrintMotor {
name: String,
state: Arc<GlobalMotorState>,
is_stopped: bool,
}
impl MotorController for PrintMotor {
fn set_speed(&mut self, new_speed: f32) {
if (self.get_motor_state().get_speed() - new_speed < FLOAT_ERROR)
|| (new_speed - self.get_motor_state().get_speed() < FLOAT_ERROR) {
info!("{}: -> {}", self.name, new_speed);
self.get_motor_state().set_speed(new_speed);
}
self.is_stopped = false;
}
fn stop(&mut self) {
if !self.is_stopped {
info!("{}: STOP", self.name);
self.is_stopped = true;
self.get_motor_state().set_speed(0.0);
}
}
fn get_motor_state(&self) -> &GlobalMotorState {
&self.state
}
}
impl PrintMotor {
pub fn new(name: &str, state: Arc<GlobalMotorState>) -> PrintMotor {
PrintMotor {
name: name.to_string(),
state,
is_stopped: false,
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
#[test]
fn test_print_motor() {
let state = Arc::new(GlobalMotorState::new());
let mut motor = PrintMotor::new("t", state.clone());
assert_eq!(0.0, motor.get_motor_state().get_speed());
motor.set_speed(1.0);
assert_eq!(1.0, motor.get_motor_state().get_speed());
motor.set_speed(-1.0);
assert_eq!(-1.0, motor.get_motor_state().get_speed());
motor.stop();
assert_eq!(0.0, motor.get_motor_state().get_speed());
motor.set_speed(1.0);
assert_eq!(1.0, motor.get_motor_state().get_speed());
}
}
| true |
18974a9f5e2ef00b711b3ee6ecb0ec9aa4fd2734
|
Rust
|
AlexseiT/sibsutis
|
/4course/graphical_information/labs/src/lib.rs
|
UTF-8
| 5,800 | 3.109375 | 3 |
[
"WTFPL"
] |
permissive
|
use std::path::Path;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fs::{File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
pub struct BMP {
file: File,
filesize: u32,
offset: u32,
width: u16,
height: u16,
image_data: Vec<u8>,
}
impl BMP {
pub fn open(path: &Path) -> std::io::Result<BMP> {
let mut file = OpenOptions::new().read(true).write(true).open(path)?;
file.seek(SeekFrom::Start(0x02))?;
let filesize = file.read_u32::<LittleEndian>()?;
file.seek(SeekFrom::Start(0x0a))?;
let offset = file.read_u32::<LittleEndian>()?;
assert_eq!(
file.read_u32::<LittleEndian>()?,
12,
"Поддерживается только BMP v2"
);
let width = file.read_u16::<LittleEndian>()?;
let height = file.read_u16::<LittleEndian>()?;
file.seek(SeekFrom::Current(2))?;
assert_eq!(
file.read_u16::<LittleEndian>()?,
24,
"Поддерживается только True Color"
);
let mut image_data = vec![0u8; width as usize * height as usize * 3];
file.seek(SeekFrom::Start(offset as u64))?;
file.read_exact(&mut image_data)?;
Ok(BMP {
file,
filesize,
offset,
width,
height,
image_data,
})
}
fn set_filesize(&mut self, new_filesize: u32) -> std::io::Result<()> {
self.file.seek(SeekFrom::Start(0x02))?;
self.file.write_u32::<LittleEndian>(new_filesize)?;
self.filesize = new_filesize;
self.file.set_len(new_filesize as u64)?;
Ok(())
}
fn set_new_dimensions(&mut self, new_width: u16, new_heigth: u16) -> std::io::Result<()> {
self.file.seek(SeekFrom::Start(0x12))?;
self.file.write_u16::<LittleEndian>(new_width)?;
self.file.write_u16::<LittleEndian>(new_heigth)?;
self.width = new_width;
self.height = new_heigth;
Ok(())
}
fn calc_line_length(&self) -> u16 {
((self.width * 3) as f64 / 4.).ceil() as u16 * 4
}
pub fn resize(&mut self, coeff: f64) -> std::io::Result<()> {
let new_width = (self.width as f64 * coeff).round() as u16;
let new_height = (self.height as f64 * coeff).round() as u16;
let mut new_image_data = Vec::with_capacity(new_width as usize * 3 * new_height as usize);
for line in 0..new_height {
for col in 0..new_width {
let start_idx = (line as f64 / coeff) as usize * self.calc_line_length() as usize
+ (col as f64 / coeff * 3.) as usize;
new_image_data.extend_from_slice(&self.image_data[start_idx..start_idx + 3]);
}
}
self.write_new_image_data(&new_image_data)?;
self.set_filesize(
self.filesize - self.image_data.len() as u32 + new_image_data.len() as u32,
)?;
self.set_new_dimensions(new_width, new_height)?;
Ok(())
}
fn write_new_image_data(&mut self, image_data: &Vec<u8>) -> std::io::Result<()> {
self.file.seek(SeekFrom::Start(self.offset as u64)).unwrap();
self.file.write_all(&image_data)?;
Ok(())
}
pub fn add(&mut self, other: &Self) -> std::io::Result<()> {
assert!(
other.height <= self.height && other.width <= self.width,
"Невозможно вписать большее изображение в меньшее"
);
let bg_b = other.image_data[0];
let bg_g = other.image_data[1];
let bg_r = other.image_data[2];
for line in 0..other.height as usize {
for col in 0..other.width as usize {
let other_idx = line * other.calc_line_length() as usize + col * 3;
let self_idx = line * self.calc_line_length() as usize + col * 3;
if other.image_data[other_idx] != bg_b
|| other.image_data[other_idx + 1] != bg_g
|| other.image_data[other_idx + 2] != bg_r
{
self.image_data[self_idx] =
other.image_data[other_idx as usize] / 2 + self.image_data[self_idx] / 2;
self.image_data[self_idx + 1] =
other.image_data[other_idx + 1] / 2 + self.image_data[self_idx + 1] / 2;
self.image_data[self_idx + 2] =
other.image_data[other_idx + 2] / 2 + self.image_data[self_idx + 2] / 2;
}
}
}
self.write_new_image_data(&self.image_data.clone())?;
Ok(())
}
pub fn add_text(&mut self, txt: &String) {
let mut image_data_it = self.image_data.iter_mut();
let mut len = txt.len();
for _ in 0..32 {
let byte = image_data_it.next().unwrap();
*byte = *byte & 0b11111100 | 0b11 & len as u8;
len >>= 2;
}
for mut byte in txt.bytes() {
for _ in 0..4 {
let img_byte = image_data_it.next().unwrap();
*img_byte = *img_byte & 0b11111100 | 0b11 & byte as u8;
byte >>= 2;
}
}
self.write_new_image_data(&self.image_data.clone()).unwrap();
}
pub fn read_text(&self) -> String {
let mut len = 0usize;
for i in (0..32).rev() {
len <<= 2;
let byte = self.image_data[i];
len |= 0b11 & byte as usize;
}
let mut str = vec![0u8; len];
for i in (0..len * 4).rev() {
str[i/4] <<= 2;
str[i/4] |= 0b11 & self.image_data[i];
}
String::from_utf8(str).unwrap()
}
}
| true |
d7fc45881875c6c2f5b460a9dd482c8938e143e4
|
Rust
|
rustwasm/wasm-bindgen
|
/crates/web-sys/tests/wasm/olist_element.rs
|
UTF-8
| 1,373 | 2.859375 | 3 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use wasm_bindgen::prelude::*;
use wasm_bindgen_test::*;
use web_sys::HtmlOListElement;
#[wasm_bindgen(module = "/tests/wasm/element.js")]
extern "C" {
fn new_olist() -> HtmlOListElement;
}
#[wasm_bindgen_test]
fn test_olist_element() {
let olist = new_olist();
olist.set_reversed(true);
assert_eq!(
olist.reversed(),
true,
"Olist should be reversed after we set it to be reversed."
);
olist.set_reversed(false);
assert_eq!(
olist.reversed(),
false,
"Olist should not be reversed after we set it to be not reversed."
);
olist.set_start(23);
assert_eq!(
olist.start(),
23,
"Olist should have the start value we gave it."
);
olist.set_type("A");
assert_eq!(
olist.type_(),
"A",
"Olist should be type 'A' after we set it to be type 'A'."
);
olist.set_type("I");
assert_eq!(
olist.type_(),
"I",
"Olist should be type 'I' after we set it to be type 'I'."
);
olist.set_compact(true);
assert_eq!(
olist.compact(),
true,
"Olist should be compact after we set it to be compact."
);
olist.set_compact(false);
assert_eq!(
olist.compact(),
false,
"Olist should not be compact after we set it to be not compact."
);
}
| true |
ab9bfd0d331aa8111f6199db0861e9c21d949cf7
|
Rust
|
JelteF/derive_more
|
/tests/deref.rs
|
UTF-8
| 1,172 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(dead_code, unused_imports)]
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(not(feature = "std"))]
use ::alloc::{boxed::Box, vec::Vec};
use derive_more::Deref;
#[derive(Deref)]
#[deref(forward)]
struct MyBoxedInt(Box<i32>);
#[derive(Deref)]
#[deref(forward)]
struct NumRef<'a> {
num: &'a i32,
}
#[derive(Deref)]
struct NumRef2<'a> {
#[deref(forward)]
num: &'a i32,
useless: bool,
}
#[derive(Deref)]
#[deref(forward)]
struct NumRef3<'a> {
num: &'a i32,
#[deref(ignore)]
useless: bool,
}
#[derive(Deref)]
struct MyInt(i32);
#[derive(Deref)]
struct Point1D {
x: i32,
}
#[derive(Deref)]
struct Point1D2 {
x: i32,
#[deref(ignore)]
useless: bool,
}
#[derive(Deref)]
struct CoolVec {
cool: bool,
#[deref]
vec: Vec<i32>,
}
#[derive(Deref)]
struct GenericVec<T>(Vec<T>);
#[test]
fn deref_generic() {
let gv = GenericVec(Vec::<i32>::new());
assert!(gv.is_empty())
}
#[derive(Deref)]
struct GenericBox<T>(#[deref(forward)] Box<T>);
#[test]
fn deref_generic_forward() {
let boxed = GenericBox(Box::new(1i32));
assert_eq!(*boxed, 1i32);
}
| true |
7722b90e34b420ac83bb88aeedd12ec6aa782f79
|
Rust
|
whytheplatypus/diem
|
/src/lib.rs
|
UTF-8
| 5,652 | 3.203125 | 3 |
[] |
no_license
|
extern crate chrono;
use chrono::NaiveDate;
use std::fmt;
use std::str::FromStr;
pub type List = Vec<Event>;
type Day = NaiveDate;
#[derive(Debug)]
pub enum Action {
Add,
Remove,
Complete,
}
#[derive(Debug)]
pub struct ActionParseError;
impl fmt::Display for ActionParseError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("invalid todo action")
}
}
impl FromStr for Action {
type Err = ActionParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"add" => Ok(Action::Add),
"remove" => Ok(Action::Remove),
"complete" => Ok(Action::Complete),
_ => Err(ActionParseError),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
Action::Add => fmt.write_str("add"),
Action::Remove => fmt.write_str("remove"),
Action::Complete => fmt.write_str("complete"),
}
}
}
#[derive(Debug)]
pub struct Event(pub Day, pub Action, pub String);
impl fmt::Display for Event {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_fmt(format_args!("{} {} {}", self.0, self.1, self.2))
}
}
#[derive(Debug)]
pub struct EventParseError;
impl fmt::Display for EventParseError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("invalid todo action")
}
}
impl From<chrono::ParseError> for EventParseError {
fn from(_error: chrono::ParseError) -> Self {
EventParseError
}
}
impl From<ActionParseError> for EventParseError {
fn from(_error: ActionParseError) -> Self {
EventParseError
}
}
impl FromStr for Event {
type Err = EventParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let v: Vec<&str> = s.splitn(3, ' ').collect();
let e = Event(
v[0].parse::<Day>()?,
v[1].parse::<Action>()?,
String::from(v[2]),
);
Ok(e)
}
}
pub fn apply(e: Event, todays_list: List) -> List {
let Event(_, a, _) = &e;
// map to implementation
match a {
Action::Add => add(e, todays_list),
Action::Remove => delete(e, todays_list),
Action::Complete => delete(e, todays_list),
}
}
fn add(task: Event, mut todays_list: List) -> List {
todays_list.push(task);
todays_list
}
fn delete(task: Event, mut todays_list: List) -> List {
todays_list.retain(|x| x.2 != task.2);
todays_list
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn add_task() -> Result<(), EventParseError> {
let mut task_list = List::new();
task_list = add(
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Add,
String::from("test task"),
),
task_list,
);
assert_eq!(
(task_list.first().expect("failed to load test")).2,
String::from("test task")
);
Ok(())
}
#[test]
fn delete_task() -> Result<(), EventParseError> {
let mut task_list = vec![Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Add,
String::from("test task"),
)];
task_list = delete(
Event(
"2020-04-12".parse::<NaiveDate>()?,
Action::Complete,
String::from("test task"),
),
task_list,
);
assert!(task_list.is_empty());
Ok(())
}
#[test]
fn delete_task_not_found() -> Result<(), EventParseError> {
let first_list = vec![Event(
"2020-04-12".parse::<NaiveDate>()?,
Action::Add,
String::from("test task"),
)];
let task_list = delete(
Event(
"2020-04-12".parse::<NaiveDate>()?,
Action::Add,
String::from("no task"),
),
first_list,
);
assert_eq!(task_list.len(), 1);
Ok(())
}
#[test]
fn apply_events() -> Result<(), chrono::ParseError> {
let events = vec![
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Add,
String::from("test task"),
),
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Remove,
String::from("test task"),
),
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Add,
String::from("test task"),
),
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Complete,
String::from("test task"),
),
Event(
"2020-04-11".parse::<NaiveDate>()?,
Action::Add,
String::from("last task"),
),
];
let mut task_list = List::new();
for e in events {
task_list = apply(e, task_list);
}
assert_eq!(
(task_list.first().expect("failed to parse tasks")).2,
String::from("last task"),
);
Ok(())
}
#[test]
fn parse_event() -> Result<(), EventParseError> {
let e = "2020-04-11 add this is a test".parse::<Event>()?;
let task_list = apply(e, List::new());
assert_eq!(
(task_list.first().expect("failed to parse tasks")).2,
String::from("this is a test"),
);
Ok(())
}
}
| true |
cf9adc9251bdaf20babeaa8978c07370d11aba24
|
Rust
|
melkibalbino/rust-conc-e-perf-seguro
|
/05-Vetores-strings-e-tipos-genericos/vetor-05-map/src/main.rs
|
UTF-8
| 657 | 3.5 | 4 |
[] |
no_license
|
#[derive(Debug)]
struct Contact {
name: &'static str,
phone_number: &'static str
}
fn main() {
let contato_01 = Contact {
name: "Sivirina Chique Chique",
phone_number: "+55 (82) 93325-6554"
};
let contato_02 = Contact {
name: "Maria Jose",
phone_number: "+55 (81) 99877-9878"
};
let agenda = vec![contato_01, contato_02];
let names = agenda.iter()
.map(|contact| { contact.name })
.collect::<Vec<_>>();
println!("Names: {:?}", names);
let phones = agenda.iter()
.map(|contact| { contact.phone_number })
.collect::<Vec<_>>();
println!("Phones: {:?}", phones)
}
| true |
f6fba57f7af0bb9144cc4bf22feaf0b0d683b23c
|
Rust
|
helloooooo/prac-algo
|
/drken-training/lakecounting.rs
|
UTF-8
| 5,473 | 3 | 3 |
[] |
no_license
|
macro_rules! input {
(source = $s:expr, $($r:tt)*) => {
let mut iter = $s.split_whitespace();
let mut next = || { iter.next().unwrap() };
input_inner!{next, $($r)*}
};
($($r:tt)*) => {
let stdin = std::io::stdin();
let mut bytes = std::io::Read::bytes(std::io::BufReader::new(stdin.lock()));
let mut next = move || -> String{
bytes
.by_ref()
.map(|r|r.unwrap() as char)
.skip_while(|c|c.is_whitespace())
.take_while(|c|!c.is_whitespace())
.collect()
};
input_inner!{next, $($r)*}
};
}
macro_rules! input_inner {
($next:expr) => {};
($next:expr, ) => {};
($next:expr, $var:ident : $t:tt $($r:tt)*) => {
let $var = read_value!($next, $t);
input_inner!{$next $($r)*}
};
($next:expr, mut $var:ident : $t:tt $($r:tt)*) => {
let mut $var = read_value!($next, $t);
input_inner!{$next $($r)*}
};
}
macro_rules! read_value {
($next:expr, ( $($t:tt),* )) => {
( $(read_value!($next, $t)),* )
};
($next:expr, [ $t:tt ; $len:expr ]) => {
(0..$len).map(|_| read_value!($next, $t)).collect::<Vec<_>>()
};
($next:expr, chars) => {
read_value!($next, String).chars().collect::<Vec<char>>()
};
($next:expr, usize1) => {
read_value!($next, usize) - 1
};
($next:expr, $t:ty) => {
$next().parse::<$t>().expect("Parse error")
};
}
use std::collections::{HashMap,HashSet};
use std::cmp::{max,min};
fn is_prime(x:i64) -> bool {
if x == 2 {return true;}
if x < 2 || x % 2 == 0 {return false;}
let mut j = 3;
while j <= (x as f64).sqrt() as i64 {
if x % j == 0 {
return false;
}
j += 2;
}
true
}
pub mod modular {
const M: i64 = 1000000007;
#[derive(Debug, Clone, Copy, Default, PartialOrd, Ord, PartialEq, Eq)]
pub struct Mod(i64);
impl ::std::fmt::Display for Mod {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl Mod {
pub fn new(v: i64) -> Mod {
Mod(v % M)
}
pub fn pow(self, mut r: i64) -> Mod {
let mut k = self;
let mut ret = 1.into();
while r > 0 {
if r % 2 != 0 {
ret = ret * k;
}
r /= 2;
k = k * k;
}
ret
}
// This requires M is prime
pub fn recip(self) -> Mod {
self.pow(M - 2)
}
}
use std::ops::*;
impl<T: Into<Mod>> Add<T> for Mod {
type Output = Mod;
fn add(self, rhs: T) -> Self::Output {
Mod::new(self.0 + rhs.into().0)
}
}
impl<T: Into<Mod>> AddAssign<T> for Mod {
fn add_assign(&mut self, rhs: T) {
*self = *self + rhs;
}
}
impl<T: Into<Mod>> Sub<T> for Mod {
type Output = Mod;
fn sub(self, rhs: T) -> Self::Output {
Mod::new(self.0 - rhs.into().0 + M)
}
}
impl<T: Into<Mod>> SubAssign<T> for Mod {
fn sub_assign(&mut self, rhs: T) {
*self = *self - rhs;
}
}
impl<T: Into<Mod>> Mul<T> for Mod {
type Output = Mod;
fn mul(self, rhs: T) -> Self::Output {
Mod::new(self.0 * rhs.into().0)
}
}
impl<T: Into<Mod>> MulAssign<T> for Mod {
fn mul_assign(&mut self, rhs: T) {
*self = *self * rhs;
}
}
impl<T: Into<Mod>> Div<T> for Mod {
type Output = Mod;
fn div(self, rhs: T) -> Self::Output {
self * rhs.into().recip()
}
}
impl<T: Into<Mod>> DivAssign<T> for Mod {
fn div_assign(&mut self, rhs: T) {
*self = *self / rhs;
}
}
impl Neg for Mod {
type Output = Mod;
fn neg(self) -> Self::Output {
Mod(0) - self
}
}
impl<T: ::std::convert::Into<i64>> ::std::convert::From<T> for Mod {
fn from(v: T) -> Self {
Mod::new(v.into())
}
}
}
type Pi = Vec<u32>;
fn calc_k(pi:&Pi, i:u32, n:u32) -> u32 {
// pi(i) + 1 .. n
let g0: HashSet<u32> = (pi[i as usize - 1] + 1.. n + 2).collect();
// pi(1) .. pi(i-1)
let g1: HashSet<u32> = pi[0 .. (i - 1) as usize].to_vec().into_iter().collect();
let diff = &g0 - &g1;
match diff.into_iter().min() {
Some(k) => k,
_ => 0
}
}
fn enumlation(n: u32) -> Vec<Pi> {
let mut pi : Pi = (1..n+1).collect();
let mut i = n - 1;
let mut result : Vec<Vec<u32>> = Vec::new();
let mut k = calc_k(&pi, i , n);
result.push(pi.clone());
// k == n + 1, i == 1
while k != n + 1 || i != 1 {
if k <= n {
pi[i as usize - 1] = k;
if i == n {
result.push(pi.clone());
}
if i < n {
pi[i as usize] = 0;
i = i + 1;
}
}
if k == n + 1 {
i = i - 1;
}
k = calc_k(&pi, i , n);
}
result
}
const MOVES: [(i64,i64); 4] = [(1, 0), (0, 1), (-1, 0), (0, -1)];
fn main(){
input!{
n:i64,
m:i64,
mut field:[chars;n];
}
let mut start = (0,0);
let mut stack = Vec::new();
stack.push(start);
while let Some(point) = stack.pop() {
for &mv in &MOVES {
if fields[point.1][point.0] == 'w' {
let after = (point.0 + mv.0, point.1 + mv.1);
if after.0 || after.1 < 0 {
continue;
}
let after = (after.0 as usize, after.1 as usize);
if field[after.1][after.0] == 'w'
}
}
}
}
| true |
a8fae04e8c6bd54d0f6f91f979db4bcd9875bdee
|
Rust
|
Grinshpon/Feldunor
|
/src/states/rl.rs
|
UTF-8
| 2,525 | 2.546875 | 3 |
[] |
no_license
|
use bracket_lib::prelude::*;
use shipyard::{AllStoragesViewMut, EntityId, EntitiesViewMut, UniqueView, ViewMut};
use std::any::Any;
use crate::state::{AppData, SEvent, State};
use crate::components::*;
use crate::map::*;
#[derive(Clone,Copy,PartialEq,Eq)]
pub enum Turn {
Player,
World,
}
pub struct RL {
pub entities: Vec<EntityId>,
pub turn: Turn,
}
impl RL {
pub fn new() -> Self {
RL {
entities: Vec::new(),
turn: Turn::Player,
}
}
}
impl State for RL {
any!();
type Event = BEvent;
fn load(&mut self, data: &mut AppData) {
data.world.add_unique(Map::new(80,50));
data.world.run_with_data(initial_entities,self);
//data.world.run(visibility);
//data.world.run(map_index);
}
fn unload(&mut self, data: &mut AppData) {
data.world.remove_unique::<Map>();
data.world.run(|mut storages: AllStoragesViewMut| {
for id in self.entities.iter() {
storages.delete(*id);
}
});
self.entities.clear();
}
fn update(&mut self, data: &mut AppData) -> SEvent<BEvent> {
data.world.run(visibility);
data.world.run(map_index);
if let Turn::World = self.turn {
data.world.run(monster_update);
self.turn = Turn::Player;
}
SEvent::Cont
}
fn event(&mut self, data: &mut AppData, event: BEvent) -> SEvent<BEvent> {
if let Turn::Player = self.turn {
if data.world.run_with_data(player_event, event) {
self.turn = Turn::World;
}
}
SEvent::Cont
}
}
fn initial_entities(
state: &mut RL,
mut entities: EntitiesViewMut,
map: UniqueView<Map>,
mut players: ViewMut<Player>,
mut stats: ViewMut<Stat>,
mut pos: ViewMut<Pos>,
mut viewsheds: ViewMut<Viewshed>,
mut monsters: ViewMut<Monster>,
mut renders: ViewMut<Render>,
mut names: ViewMut<Name>,
mut blocks: ViewMut<BlockTile>,
) {
let start = map.rooms[0].center();
add_entity!(state,entities,
(&mut players, &mut stats, &mut pos, &mut viewsheds, &mut renders),
(Player, Stat::default(), Pos { x: start[0], y: start[1] }, Viewshed::new(12), Render::player()),
);
for room in map.rooms.iter().skip(1) {
let [x,y] = room.center();
add_entity!(state,entities,
(&mut monsters, &mut names, &mut stats, &mut pos, &mut viewsheds, &mut renders, &mut blocks),
(Monster, Name(String::from("Goblin")), Stat::new(2,2,2,2), Pos {x,y}, Viewshed::new(12), Render::goblin(), BlockTile),
);
}
}
| true |
92991f8950f3eed76fd09034ddecdf5e74ac1f21
|
Rust
|
ticketland-io/sharks
|
/src/lib.rs
|
UTF-8
| 7,263 | 3.515625 | 4 |
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] |
permissive
|
//! Fast, small and secure [Shamir's Secret Sharing](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing) library crate
//!
//! Usage example (std):
//! ```
//! use sharks::{ Sharks, Share };
//!
//! // Set a minimum threshold of 10 shares
//! let sharks = Sharks(10);
//! // Obtain an iterator over the shares for secret [1, 2, 3, 4]
//! # #[cfg(feature = "std")]
//! # {
//! let dealer = sharks.dealer(&[1, 2, 3, 4]);
//! // Get 10 shares
//! let shares: Vec<Share> = dealer.take(10).collect();
//! // Recover the original secret!
//! let secret = sharks.recover(shares.as_slice()).unwrap();
//! assert_eq!(secret, vec![1, 2, 3, 4]);
//! # }
//! ```
//!
//! Usage example (no std):
//! ```
//! use sharks::{ Sharks, Share };
//! use rand_chacha::rand_core::SeedableRng;
//!
//! // Set a minimum threshold of 10 shares
//! let sharks = Sharks(10);
//! // Obtain an iterator over the shares for secret [1, 2, 3, 4]
//! let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]);
//! let dealer = sharks.dealer_rng(&[1, 2, 3, 4], &mut rng);
//! // Get 10 shares
//! let shares: Vec<Share> = dealer.take(10).collect();
//! // Recover the original secret!
//! let secret = sharks.recover(shares.as_slice()).unwrap();
//! assert_eq!(secret, vec![1, 2, 3, 4]);
//! ```
#![cfg_attr(not(feature = "std"), no_std)]
mod field;
mod math;
mod share;
extern crate alloc;
use alloc::vec::Vec;
use hashbrown::HashSet;
use field::GF256;
pub use share::Share;
/// Tuple struct which implements methods to generate shares and recover secrets over a 256 bits Galois Field.
/// Its only parameter is the minimum shares threshold.
///
/// Usage example:
/// ```
/// # use sharks::{ Sharks, Share };
/// // Set a minimum threshold of 10 shares
/// let sharks = Sharks(10);
/// // Obtain an iterator over the shares for secret [1, 2, 3, 4]
/// # #[cfg(feature = "std")]
/// # {
/// let dealer = sharks.dealer(&[1, 2, 3, 4]);
/// // Get 10 shares
/// let shares: Vec<Share> = dealer.take(10).collect();
/// // Recover the original secret!
/// let secret = sharks.recover(shares.as_slice()).unwrap();
/// assert_eq!(secret, vec![1, 2, 3, 4]);
/// # }
/// ```
pub struct Sharks(pub u8);
impl Sharks {
/// This method is useful when `std` is not available. For typical usage
/// see the `dealer` method.
///
/// Given a `secret` byte slice, returns an `Iterator` along new shares.
/// The maximum number of shares that can be generated is 256.
/// A random number generator has to be provided.
///
/// Example:
/// ```
/// # use sharks::{ Sharks, Share };
/// # use rand_chacha::rand_core::SeedableRng;
/// # let sharks = Sharks(3);
/// // Obtain an iterator over the shares for secret [1, 2]
/// let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]);
/// let dealer = sharks.dealer_rng(&[1, 2], &mut rng);
/// // Get 3 shares
/// let shares: Vec<Share> = dealer.take(3).collect();
pub fn dealer_rng<R: rand::Rng>(
&self,
secret: &[u8],
rng: &mut R,
) -> impl Iterator<Item = Share> {
let mut polys = Vec::with_capacity(secret.len());
for chunk in secret {
polys.push(math::random_polynomial(GF256(*chunk), self.0, rng))
}
math::get_evaluator(polys)
}
/// Given a `secret` byte slice, returns an `Iterator` along new shares.
/// The maximum number of shares that can be generated is 256.
///
/// Example:
/// ```
/// # use sharks::{ Sharks, Share };
/// # let sharks = Sharks(3);
/// // Obtain an iterator over the shares for secret [1, 2]
/// let dealer = sharks.dealer(&[1, 2]);
/// // Get 3 shares
/// let shares: Vec<Share> = dealer.take(3).collect();
#[cfg(feature = "std")]
pub fn dealer(&self, secret: &[u8]) -> impl Iterator<Item = Share> {
let mut rng = rand::thread_rng();
self.dealer_rng(secret, &mut rng)
}
/// Given an iterable collection of shares, recovers the original secret.
/// If the number of distinct shares is less than the minimum threshold an `Err` is returned,
/// otherwise an `Ok` containing the secret.
///
/// Example:
/// ```
/// # use sharks::{ Sharks, Share };
/// # use rand_chacha::rand_core::SeedableRng;
/// # let sharks = Sharks(3);
/// # let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]);
/// # let mut shares: Vec<Share> = sharks.dealer_rng(&[1], &mut rng).take(3).collect();
/// // Recover original secret from shares
/// let mut secret = sharks.recover(&shares);
/// // Secret correctly recovered
/// assert!(secret.is_ok());
/// // Remove shares for demonstration purposes
/// shares.clear();
/// secret = sharks.recover(&shares);
/// // Not enough shares to recover secret
/// assert!(secret.is_err());
pub fn recover<'a, T>(&self, shares: T) -> Result<Vec<u8>, &str>
where
T: IntoIterator<Item = &'a Share>,
T::IntoIter: Iterator<Item = &'a Share>,
{
let mut share_length: Option<usize> = None;
let mut keys: HashSet<u8> = HashSet::new();
let mut values: Vec<Share> = Vec::new();
for share in shares.into_iter() {
if share_length.is_none() {
share_length = Some(share.y.len());
}
if Some(share.y.len()) != share_length {
return Err("All shares must have the same length");
} else {
keys.insert(share.x.0);
values.push(share.clone());
}
}
if keys.is_empty() || (keys.len() < self.0 as usize) {
Err("Not enough shares to recover original secret")
} else {
Ok(math::interpolate(values.as_slice()))
}
}
}
#[cfg(test)]
mod tests {
use super::{Share, Sharks};
use alloc::{vec, vec::Vec};
impl Sharks {
#[cfg(not(feature = "std"))]
fn make_shares(&self, secret: &[u8]) -> impl Iterator<Item = Share> {
use rand_chacha::{rand_core::SeedableRng, ChaCha8Rng};
let mut rng = ChaCha8Rng::from_seed([0x90; 32]);
self.dealer_rng(secret, &mut rng)
}
#[cfg(feature = "std")]
fn make_shares(&self, secret: &[u8]) -> impl Iterator<Item = Share> {
self.dealer(secret)
}
}
#[test]
fn test_insufficient_shares_err() {
let sharks = Sharks(255);
let shares: Vec<Share> = sharks.make_shares(&[1]).take(254).collect();
let secret = sharks.recover(&shares);
assert!(secret.is_err());
}
#[test]
fn test_duplicate_shares_err() {
let sharks = Sharks(255);
let mut shares: Vec<Share> = sharks.make_shares(&[1]).take(255).collect();
shares[1] = Share {
x: shares[0].x.clone(),
y: shares[0].y.clone(),
};
let secret = sharks.recover(&shares);
assert!(secret.is_err());
}
#[test]
fn test_integration_works() {
let sharks = Sharks(255);
let shares: Vec<Share> = sharks.make_shares(&[1, 2, 3, 4]).take(255).collect();
let secret = sharks.recover(&shares).unwrap();
assert_eq!(secret, vec![1, 2, 3, 4]);
}
}
| true |
6bbae8b015adb3b8ea9cfb15b08a41815a6f9c9b
|
Rust
|
i-dentify/echarge-vnext
|
/rust/car/api/src/graphql/models.rs
|
UTF-8
| 460 | 2.546875 | 3 |
[] |
no_license
|
use std::num::NonZeroU16;
use async_graphql::{ID, Object};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct Car {
pub id: ID,
pub name: String,
pub battery_capacity: NonZeroU16,
}
#[Object]
impl Car {
async fn id(&self) -> &ID {
&self.id
}
async fn name(&self) -> &String {
&self.name
}
async fn battery_capacity(&self) -> &NonZeroU16 {
&self.battery_capacity
}
}
| true |
54bdb966b84d57a224d504c5e00576e8ba80d97a
|
Rust
|
yvt/farcri-rs
|
/src/bencher/bencher.rs
|
UTF-8
| 4,645 | 3.625 | 4 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use super::measurement;
/// Timer struct used to iterate a benchmarked function and measure the runtime.
///
/// This struct provides different timing loops as methods. Each timing loop provides a different
/// way to time a routine and each has advantages and disadvantages.
///
/// * If you want to do the iteration and measurement yourself (eg. passing the iteration count
/// to a separate process), use `iter_custom`.
/// * Otherwise, use `iter`.
pub struct Bencher<'link> {
/// Have we iterated this benchmark?
pub(super) iterated: bool,
/// Number of times to iterate this benchmark
pub(super) iters: u64,
/// The measured value
pub(super) value: u64,
/// Reference to the measurement object
pub(super) measurement: measurement::Measurement<'link>,
/// How much time did it take to perform the iteration? Used for the warmup period.
pub(super) elapsed_time: measurement::Duration,
/// Specifies whether `elapsed_time` should be set.
pub(super) wants_elapsed_time: bool,
}
impl Bencher<'_> {
/// Times a `routine` by executing it many times and timing the total elapsed time.
///
/// Prefer this timing loop when `routine` returns a value that doesn't have a destructor.
///
/// # Timing model
///
/// Note that the `Bencher` also times the time required to destroy the output of `routine()`.
/// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared
/// to the runtime of the `routine`.
///
/// ```text
/// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next)
/// ```
///
/// # Example
///
/// ```rust
/// #[macro_use] extern crate criterion;
///
/// use criterion::*;
///
/// // The function to benchmark
/// fn foo() {
/// // ...
/// }
///
/// fn bench(c: &mut Criterion) {
/// c.bench_function("iter", move |b| {
/// b.iter(|| foo())
/// });
/// }
///
/// criterion_group!(benches, bench);
/// criterion_main!(benches);
/// ```
///
#[inline(never)]
pub fn iter<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iterated = true;
let time_start = self.wants_elapsed_time.then(|| self.measurement.now());
let start = self.measurement.value();
for _ in 0..self.iters {
black_box(routine());
}
self.value = self.measurement.value().wrapping_sub(start);
if let Some(time_start) = time_start {
self.elapsed_time = self.measurement.now() - time_start;
}
}
/// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time.
///
/// Prefer this timing loop in cases where `routine` has to do its own measurements to
/// get accurate timing information (for example in multi-threaded scenarios where you spawn
/// and coordinate with multiple threads).
///
/// # Timing model
/// Custom, the timing model is whatever is returned as the Duration from `routine`.
///
/// # Example
/// ```rust
/// #[macro_use] extern crate criterion;
/// use criterion::*;
/// use criterion::black_box;
/// use std::time::Instant;
///
/// fn foo() {
/// // ...
/// }
///
/// fn bench(c: &mut Criterion) {
/// c.bench_function("iter", move |b| {
/// b.iter_custom(|iters| {
/// let start = Instant::now();
/// for _i in 0..iters {
/// black_box(foo());
/// }
/// start.elapsed()
/// })
/// });
/// }
///
/// criterion_group!(benches, bench);
/// criterion_main!(benches);
/// ```
///
#[inline(never)]
pub fn iter_custom<R>(&mut self, mut routine: R)
where
R: FnMut(u64) -> u64,
{
self.iterated = true;
let time_start = self.measurement.now();
self.value = routine(self.iters);
self.elapsed_time = self.measurement.now() - time_start;
}
// Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly
// if they don't.
pub(crate) fn assert_iterated(&mut self) {
if !self.iterated {
panic!("Benchmark function must call Bencher::iter or related method.");
}
self.iterated = false;
}
}
pub fn black_box<T>(dummy: T) -> T {
unsafe {
let ret = core::ptr::read_volatile(&dummy);
core::mem::forget(dummy);
ret
}
}
| true |
a3088ae53e4f2c6d482b043ebe65ab5f0c8d99b9
|
Rust
|
AngelOnFira/advent-of-code
|
/2021/src/day25.rs
|
UTF-8
| 3,079 | 3.421875 | 3 |
[] |
no_license
|
use std::collections::HashMap;
use regex::Regex;
pub struct Instruction {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Place {
East,
South,
Empty,
}
const xlen: usize = 8;
const ylen: usize = 7;
#[aoc_generator(day25)]
pub fn input_generator(input: &str) -> [[Place; xlen]; ylen] {
// v...>>.vv>
// .vv>>.vv..
// >>.>v>...v
// >>v>>.>.v.
// v>v.vv.v..
// >.>>..v...
// .vv..>.>v.
// v.v..>>v.v
// ....v..v.>
let mut map = [[Place::Empty; xlen]; ylen];
input.lines().enumerate().for_each(|(y, line)| {
line.chars().enumerate().for_each(|(x, c)| {
map[y][x] = match c {
'>' => Place::East,
'v' => Place::South,
'.' => Place::Empty,
_ => panic!("Unknown character {}", c),
};
});
});
map
}
#[aoc(day25, part1)]
pub fn solve_part1(input: &[[Place; xlen]; ylen]) -> i32 {
let mut old_map = input.clone();
let mut i = 0;
loop {
i += 1;
let mut new_map = old_map.clone();
// Move all the east facing first
for y in 0..ylen {
for x in 0..xlen {
if let Place::East = old_map[y][x] {
let mut new_x = x + 1;
let mut new_y = y;
if new_x == xlen {
new_x = 0;
}
// If this space is empty, move to it
if old_map[new_y][new_x] == Place::Empty {
new_map[new_y][new_x] = Place::East;
new_map[new_y][x] = Place::Empty;
}
}
}
}
let old_map_2 = new_map.clone();
// Move all the south facing next
for y in 0..ylen {
for x in 0..xlen {
if let Place::South = old_map_2[y][x] {
let mut new_x = x;
let mut new_y = y + 1;
if new_y == ylen {
new_y = 0;
}
// If this space is empty, move to it
if old_map_2[new_y][new_x] == Place::Empty {
new_map[new_y][new_x] = Place::South;
new_map[y][new_x] = Place::Empty;
}
}
}
}
// Draw the new map
for y in 0..ylen {
for x in 0..xlen {
print!(
"{}",
match new_map[y][x] {
Place::East => '>',
Place::South => 'v',
Place::Empty => '.',
}
);
}
println!();
}
println!();
if new_map == old_map {
break;
}
if i > 10 {
break;
}
old_map = new_map.clone();
}
i
}
#[aoc(day25, part2)]
pub fn solve_part2(input: &[[Place; xlen]; ylen]) -> i32 {
3
}
| true |
b4aced5eda990bc669ff1858e048b8f62f0b0f27
|
Rust
|
EvanLib/Chip8
|
/src/core/display.rs
|
UTF-8
| 4,576 | 3.109375 | 3 |
[] |
no_license
|
use minifb::{Key, Scale, Window, WindowOptions};
use std::fmt;
const CHIP8_WIDTH: usize = 64;
const CHIP8_HEIGHT: usize = 32;
#[derive(Clone, Debug)]
pub struct Display {
// vram
pub vram: [[u8; CHIP8_WIDTH]; CHIP8_HEIGHT],
// update
pub update: bool,
// minifb buffer
pub buffer: Vec<u32>,
}
impl Display {
/// Create and return a new Display instance.
pub fn new() -> Display {
let buffer_width = CHIP8_WIDTH;
let buffer_height = CHIP8_HEIGHT;
Display {
vram: [[0u8; CHIP8_WIDTH]; CHIP8_HEIGHT],
buffer: vec![from_u8_rgb(0, 0, 0); buffer_height * buffer_width],
update: true,
}
}
pub fn draw(&mut self, xpos: usize, ypos: usize, sprite: &[u8]) -> bool {
let mut collision = false;
let h = sprite.len();
for j in 0..h {
for i in 0..8 {
// screen wrap if necessary
let y = (ypos + j) % CHIP8_HEIGHT;
let x = (xpos + i) % CHIP8_WIDTH;
// draw each sprite pixel with a XOR operation
// i.e. toggle the pixel
// 0x80 = 1000 0000 : allows to check each pixel in the sprite
if (sprite[j] & (0x80 >> i)) != 0x00 {
if self.vram[y][x] == 0x01 {
collision = true;
}
self.vram[y][x] ^= 0x01;
}
}
}
self.update = true;
collision
}
}
impl fmt::Display for Display {
// This trait requires `fmt` with this exact signature.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..CHIP8_WIDTH {
for j in 0..CHIP8_HEIGHT {
write!(f, "{}", self.vram[j][i])?;
}
write!(f, "\n")?;
}
Ok(())
}
}
fn from_u8_rgb(r: u8, g: u8, b: u8) -> u32 {
let (r, g, b) = (r as u32, g as u32, b as u32);
(r << 16) | (g << 8) | b
}
pub fn emulate() {
let window_width = CHIP8_WIDTH;
let window_height = CHIP8_HEIGHT;
let buffer_width = CHIP8_WIDTH;
let buffer_height = CHIP8_HEIGHT;
let mut display = Display::new();
let mut window = Window::new(
"Test",
window_width,
window_height,
WindowOptions {
scale: Scale::X4,
resize: true,
borderless: false,
title: true,
..WindowOptions::default()
},
)
.unwrap();
display.draw(0, 0, &FONT_SET[0..5]);
display.draw(5, 0, &FONT_SET[5..10]);
display.draw(10, 0, &FONT_SET[15..20]);
display.draw(15, 0, &FONT_SET[20..25]);
display.draw(20, 0, &FONT_SET[25..30]);
println!("{}", display);
for y in 0u32..(buffer_height as u32) {
for x in 0u32..(buffer_width as u32) {
if display.vram[y as usize][x as usize] == 1u8 {
let ve = y * (buffer_width as u32) + x;
display.buffer[ve as usize] = from_u8_rgb(255, 0, 0);
}
}
}
window
.update_with_buffer(&display.buffer, buffer_width, buffer_height)
.unwrap();
while window.is_open() && !window.is_key_down(Key::Escape) {
window.update();
if window.is_key_down(Key::A) {
display.draw(20, 8, &FONT_SET[25..30]);
for y in 0u32..(buffer_height as u32) {
for x in 0u32..(buffer_width as u32) {
if display.vram[y as usize][x as usize] == 1u8 {
let ve = y * (buffer_width as u32) + x;
display.buffer[ve as usize] = from_u8_rgb(255, 0, 0);
}
}
}
window
.update_with_buffer(&display.buffer, buffer_width, buffer_height)
.unwrap();
println!("Key A is down");
}
}
}
pub static FONT_SET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80, // F
];
| true |
341a39435e90d1c4f7bf35cfe4faabcd787721dd
|
Rust
|
timvermeulen/hollow_heap
|
/tests/proptests.rs
|
UTF-8
| 1,495 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
#[macro_use]
extern crate proptest;
use proptest::prelude::*;
use proptest::collection::vec;
use hollow_heap::HollowHeap;
proptest! {
#[test]
fn doesnt_crash(num in 0..100000) {
let mut heap = HollowHeap::max_heap();
heap.push(num);
assert!(heap.pop() == Some(num));
assert!(heap.pop() == None);
}
#[test]
fn repeated_pop_returns_sorted_vec(vector in vec(u32::arbitrary(), 0..1000)) {
println!("{:?}", vector);
let mut heap = HollowHeap::max_heap();
for num in vector.iter() {
heap.push(num);
}
let mut sorted = vector.clone();
sorted.sort_by(|a, b| b.cmp(a));
for num in sorted.iter() {
prop_assert_eq!(heap.pop(), Some(num));
}
}
#[test]
fn doesnt_crash_with_delete_and_increase_key(vector in vec(u32::arbitrary(), 2..1000)) {
println!("{:?}", vector);
let mut heap = HollowHeap::max_heap();
let mut index = None;
let mut second_index = None;
for num in vector.iter() {
if index.is_none() {
index = Some(heap.push(*num));
} else if second_index.is_none() {
second_index = Some(heap.push(*num));
}
}
let index = index.unwrap();
let second_index = second_index.unwrap();
let value = *heap.peek().unwrap();
heap.increase_key(index, value + 1);
heap.delete(second_index);
}
}
| true |
4ce01454291b4694418af0ceb422491b447ba22b
|
Rust
|
henryboisdequin/vsreview
|
/server-rs/src/models/answer.rs
|
UTF-8
| 749 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
use crate::models::user::User;
use crate::utils::DATE_FORMAT;
use chrono::{DateTime, Utc};
use serde::Serialize;
#[derive(Queryable)]
pub struct Answer {
pub id: i32,
pub content: String,
pub question: i32,
pub author: i32,
pub created_at: DateTime<Utc>,
}
impl Answer {
pub fn attach(self, author: User) -> AnswerJson {
AnswerJson {
id: self.id,
content: self.content,
question: self.question,
author: author.id,
created_at: self.created_at.format(DATE_FORMAT).to_string(),
}
}
}
#[derive(Serialize)]
pub struct AnswerJson {
pub id: i32,
pub content: String,
pub question: i32,
pub author: i32,
pub created_at: String,
}
| true |
b2964ab0ae1c434529af16974b0a6a23a165628b
|
Rust
|
dfischer/unbase
|
/examples/ping-pong.rs
|
UTF-8
| 2,842 | 2.703125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#![feature(proc_macro, conservative_impl_trait, generators)]
extern crate futures_await as futures;
use futures::stream::Stream;
extern crate unbase;
use unbase::{Network,SubjectHandle};
use std::{thread,time};
/// This example is a rudimentary interaction between two remote nodes
/// As of the time of this writing, the desired convergence properties of the system are not really implemented.
/// For now we are relying on the size of the cluster being smaller than the memo peering target,
/// rather than gossip (once the record has been made resident) or index convergence (prior to the record being located).
fn main() {
let t1 = thread::spawn(move || {
let net1 = Network::create_new_system();
let udp1 = unbase::network::transport::TransportUDP::new( "127.0.0.1:12001".to_string() );
net1.add_transport( Box::new(udp1) );
let context_a = unbase::Slab::new(&net1).create_context();
println!("A - Sending Initial Ping");
let rec_a1 = SubjectHandle::new_kv(&context_a, "action", "Ping").unwrap();
let mut pings = 0;
for _ in rec_a1.observe().wait() {
println!("A - VAL {:?}, {}", rec_a1.head_memo_ids(), rec_a1.get_value("action").unwrap());
if "Pong" == rec_a1.get_value("action").unwrap() {
println!("A - [ Ping -> ]");
rec_a1.set_value("action","Ping").unwrap();
pings += 1;
if pings >= 10 {
break
}
}
}
});
// Ensure slab_a is listening
thread::sleep( time::Duration::from_millis(50) );
let t2 = thread::spawn(move || {
let net2 = unbase::Network::new();
net2.hack_set_next_slab_id(200);
let udp2 = unbase::network::transport::TransportUDP::new("127.0.0.1:12002".to_string());
net2.add_transport( Box::new(udp2.clone()) );
let context_b = unbase::Slab::new(&net2).create_context();
udp2.seed_address_from_string( "127.0.0.1:12001".to_string() );
println!("B - Waiting for root index seed...");
context_b.root_index_wait( 1000 ).unwrap();
println!("B - Searching for Ping record...");
let rec_b1 = context_b.fetch_kv_wait( "action", "Ping", 10000 ).unwrap();
println!("B - Found Ping record.");
let mut pongs = 0;
for _ in rec_b1.observe().wait() {
if "Ping" == rec_b1.get_value("action").unwrap() {
println!("B - [ <- Pong ]");
rec_b1.set_value("action","Pong").unwrap();
pongs += 1;
if pongs >= 10 {
break
}
}
}
});
t2.join().expect("thread 2"); // Thread 2 is more likely to panic
t1.join().expect("thread 1");
}
| true |
25bed8cf7e1aeffa0e3e6e1b14867883bda86131
|
Rust
|
zarbafian/draft
|
/src/util.rs
|
UTF-8
| 2,370 | 3.625 | 4 |
[
"MIT"
] |
permissive
|
use std::sync::{mpsc, Arc, Mutex};
use std::thread;
use log::{debug, info};
type MessageHandler = Box<dyn FnOnce() + Send + 'static>;
enum Message {
New(MessageHandler),
Terminate,
}
struct Worker {
id: usize,
thread: Option<thread::JoinHandle<()>>,
}
impl Worker {
fn new(id: usize, receiver: Arc<Mutex<mpsc::Receiver<Message>>>) -> Worker {
let thread = thread::Builder::new()
.name(format!("message handler {}", id))
.spawn(move ||{
info!("Worker started");
loop {
let handler = receiver.lock().unwrap().recv().unwrap();
match handler {
Message::New(handler) => {
debug!("Received a message");
handler()
},
Message::Terminate => {
debug!("Will terminate");
break
},
}
}
info!("Worker terminated");
}).unwrap();
Worker{
id,
thread: Some(thread),
}
}
}
pub struct ThreadPool {
sender: mpsc::Sender<Message>,
workers: Vec<Worker>,
}
impl ThreadPool {
pub fn new(size: usize) -> ThreadPool{
assert!(size > 0, "Invalid size for thread pool: {}", size);
let (sender, receiver) = mpsc::channel();
let receiver = Arc::new(Mutex::new(receiver));
let mut workers = Vec::with_capacity(size);
for id in 1..=size {
workers.push(Worker::new(id, Arc::clone(&receiver)));
}
ThreadPool{
sender,
workers,
}
}
pub fn execute(&self, handler: MessageHandler) {
self.sender.send(Message::New(handler)).unwrap();
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
info!("Will send terminate message to all workers");
for _ in &self.workers {
self.sender.send(Message::Terminate).unwrap();
}
info!("Shutting down all workers");
for worker in &mut self.workers {
info!("Shutting down worker {}", worker.id);
if let Some(thread) = worker.thread.take() {
thread.join().unwrap();
}
}
}
}
| true |
b5067895ceff2758edf7d286c888ab8c1c3d86ec
|
Rust
|
Daivasmara/rust_book
|
/06/if_let/src/main.rs
|
UTF-8
| 1,294 | 4.09375 | 4 |
[] |
no_license
|
#[derive(Debug)]
enum UsState {
Alabama,
}
enum Coin {
Penny,
Quarter(UsState),
}
fn main() {
let some_u8_values = Some(0u8);
match some_u8_values {
Some(3) => println!("three"),
_ => (),
}
// code above too verbose since you need to add _ in order to make this code works,
// instead you can use if let, example below
if let Some(3) = some_u8_values {
println!("three");
}
// you can think of if let as an syntax sugar for a match
// another example below
let mut count: u32 = 0;
let coin1: Coin = Coin::Quarter(UsState::Alabama);
coin_func_match(&coin1, &mut count);
coin_func_if_let(&coin1, &mut count);
let coin2: Coin = Coin::Penny;
coin_func_match(&coin2, &mut count);
coin_func_if_let(&coin2, &mut count);
}
fn coin_func_match(coin: &Coin, count: &mut u32) {
match coin {
Coin::Quarter(state) => println!("State quarter from {:?}", state),
_ => {
*count += 1;
println!("count: {}", count);
}
}
}
fn coin_func_if_let(coin: &Coin, count: &mut u32) {
if let Coin::Quarter(state) = coin {
println!("State quarter from {:?}", state);
} else {
*count += 1;
println!("count: {}", count);
}
}
| true |
5274975d6f2894b24fe5d820583fb5e2dd745ffb
|
Rust
|
joedborg/diffie-hellman-demo
|
/src/onlookers.rs
|
UTF-8
| 287 | 2.84375 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
pub struct Public {
pub prime: u64,
pub root: u64,
pub amy: Option<u64>,
pub ben: Option<u64>,
}
impl Public {
pub fn amy(&mut self, value: u64) {
self.amy = Some(value);
}
pub fn ben(&mut self, value: u64) {
self.ben = Some(value);
}
}
| true |
1f4bf4b00f733de27532785730039b2d0404ec7d
|
Rust
|
jeremyschlatter/enigma
|
/src/main.rs
|
UTF-8
| 4,691 | 2.875 | 3 |
[] |
no_license
|
const ALPHABET_SIZE: usize = 26;
type Permutation = [usize; ALPHABET_SIZE];
const PERMUTATIONS: [Permutation; 5] = [
[
0, 21, 4, 7, 15, 18, 12, 14, 16, 8, 3, 19, 24, 23, 2, 11, 13, 5, 22, 20, 6, 25, 10, 17, 9,
1,
],
[
5, 22, 8, 24, 14, 16, 7, 11, 10, 18, 6, 15, 9, 25, 0, 2, 13, 3, 23, 21, 12, 20, 4, 17, 19,
1,
],
[
25, 4, 6, 20, 13, 21, 14, 12, 22, 11, 0, 17, 9, 16, 10, 15, 5, 19, 8, 1, 7, 3, 2, 24, 23,
18,
],
[
2, 17, 9, 1, 21, 12, 15, 11, 20, 3, 24, 14, 4, 10, 16, 22, 23, 5, 19, 7, 25, 6, 18, 13, 0,
8,
],
[
2, 17, 9, 1, 21, 12, 15, 11, 20, 3, 24, 14, 4, 10, 16, 22, 23, 5, 19, 7, 25, 6, 18, 13, 0,
8,
],
];
#[derive(Debug)]
struct Enigma {
rotors: [Rotor; 3],
reflector: Reflector,
plugboard: Permutation,
}
fn u2c(u: usize) -> char {
(u + ('a' as u8 as usize)) as u8 as char
}
fn c2u(c: char) -> usize {
c as usize - 'a' as usize
}
fn make_plugboard(p: Permutation, n: u32) -> Permutation {
let mut r = [0; ALPHABET_SIZE];
for i in 0..r.len() {
r[i] = i;
}
for i in 0..(n as usize) {
let (a, b) = (p[i * 2], p[i * 2 + 1]);
r[a] = b;
r[b] = a;
}
r
}
impl Enigma {
fn default() -> Enigma {
Enigma {
rotors: [
Rotor::from(PERMUTATIONS[0], 3),
Rotor::from(PERMUTATIONS[1], 5),
Rotor::from(PERMUTATIONS[2], 10),
],
reflector: Reflector::from(PERMUTATIONS[3]),
plugboard: make_plugboard(PERMUTATIONS[4], 10),
}
}
fn cipher(&mut self, s: &str) -> String {
let mut result = String::with_capacity(s.len());
for c in s.chars() {
result.push(self.cipher_one(c))
}
result
}
fn cipher_one(&mut self, c: char) -> char {
let mut u = c2u(c);
u = self.plugboard[u];
for rotor in self.rotors.iter() {
u = rotor.forward(u);
}
u = self.reflector.reflect(u);
for rotor in self.rotors.iter().rev() {
u = rotor.backward(u);
}
u = self.plugboard[u];
// Step the rotors.
for i in 0..self.rotors.len() {
if i == 0 || self.rotors[i - 1].notch == self.rotors[i - 1].offset
// Double-stepping:
|| (i < self.rotors.len() && self.rotors[i].notch == self.rotors[i].offset)
{
self.rotors[i].offset = (self.rotors[i].offset + 1) % ALPHABET_SIZE;
}
}
u2c(u)
}
}
#[derive(Debug)]
struct Reflector {
wiring: Permutation,
}
impl Reflector {
fn from(p: Permutation) -> Reflector {
let mut r = p.clone();
for i in 0..ALPHABET_SIZE / 2 {
let (a, b) = (p[i * 2], p[i * 2 + 1]);
r[a] = b;
r[b] = a;
}
Reflector { wiring: r }
}
fn reflect(&self, i: usize) -> usize {
self.wiring[i]
}
}
#[derive(Debug)]
struct Rotor {
wiring: Permutation,
wiring_backward: Permutation,
offset: usize,
notch: usize,
}
impl Rotor {
fn from(p: Permutation, notch: usize) -> Rotor {
let mut r = Rotor {
wiring: p,
wiring_backward: p,
offset: 0,
notch: notch,
};
for i in 0..ALPHABET_SIZE {
r.wiring[i] = (ALPHABET_SIZE + p[i] - i) % ALPHABET_SIZE;
r.wiring_backward[p[i]] = (ALPHABET_SIZE + i - p[i]) % ALPHABET_SIZE;
}
r
}
fn forward(&self, i: usize) -> usize {
(i + self.wiring[(i + self.offset) % ALPHABET_SIZE]) % ALPHABET_SIZE
}
fn backward(&self, i: usize) -> usize {
(i + self.wiring_backward[(i + self.offset) % ALPHABET_SIZE]) % ALPHABET_SIZE
}
}
fn main() {
let mut enigma = Enigma::default();
println!("{:?}", enigma.cipher("svoolcpbov"));
println!("{:?}", enigma.cipher("helloxkyle"));
}
#[test]
fn it_is_symmetric() {
let plaintext = "helloxkyle";
let ciphertext = Enigma::default().cipher(plaintext);
assert_eq!(Enigma::default().cipher(&ciphertext), plaintext);
}
#[test]
fn it_steps() {
let mut enigma = Enigma::default();
assert_ne!(enigma.cipher("testing"), enigma.cipher("testing"));
}
#[test]
fn it_steps_all_rotors() {
let mut enigma = Enigma::default();
for _ in 0..(ALPHABET_SIZE.pow(2) + ALPHABET_SIZE + 4) {
enigma.cipher("a");
}
assert_eq!(
(1, 3, 4),
(
enigma.rotors[2].offset,
enigma.rotors[1].offset,
enigma.rotors[0].offset
)
);
}
| true |
fb1992794599f32637b606e768ff6f64d37480ef
|
Rust
|
rainapepe/rust-nes-emulator
|
/src/ppu/memory_access.rs
|
UTF-8
| 13,763 | 2.875 | 3 |
[] |
no_license
|
use crate::cartridge::Mirror;
use super::ppu2C02::Ppu2C02;
impl Ppu2C02 {
pub fn cpu_read(&mut self, addr: u16, read_only: bool) -> u8 {
if read_only {
// Reading from PPU registers can affect their contents
// so this read only option is used for examining the
// state of the PPU without changing its state. This is
// really only used in debug mode.
match addr {
// Control
0x0000 => self.control.reg,
// Mask
0x0001 => self.mask.reg,
// Status
0x0002 => self.status.reg,
// OAM Address
0x0003 => 0,
// OAM Data
0x0004 => 0,
// Scroll
0x0005 => 0,
// PPU Address
0x0006 => 0,
// PPU Data
0x0007 => 0,
_ => 0,
}
} else {
// These are the live PPU registers that repsond
// to being read from in various ways. Note that not
// all the registers are capable of being read from
// so they just return 0x00
match addr {
// Control - Not readable
0x0000 => 0,
// Mask - Not Readable
0x0001 => 0,
// Status
0x0002 => {
// Reading from the status register has the effect of resetting
// different parts of the circuit. Only the top three bits
// contain status information, however it is possible that
// some "noise" gets picked up on the bottom 5 bits which
// represent the last PPU bus transaction. Some games "may"
// use this noise as valid data (even though they probably
// shouldn't)
let data = (self.status.reg & 0xE0) | (self.ppu_data_buffer & 0x1F);
// Clear the vertical blanking flag
self.status.set_vertical_blank(0);
// Reset Loopy's Address latch flag
self.address_latch = 0;
data
}
// OAM Address - Not Readable
0x0003 => 0,
// OAM Data
0x0004 => self.oam_read(self.oam_addr),
// Scroll - Not Readable
0x0005 => 0,
// PPU Address - Not Readable
0x0006 => 0,
// PPU Data
0x0007 => {
// Reads from the NameTable ram get delayed one cycle,
// so output buffer which contains the data from the
// previous read request
let data = self.ppu_data_buffer;
// then update the buffer for next time
self.ppu_data_buffer = self.ppu_read(self.vram_addr.reg);
// All reads from PPU data automatically increment the nametable
// address depending upon the mode set in the control register.
// If set to vertical mode, the increment is 32, so it skips
// one whole nametable row; in horizontal mode it just increments
// by 1, moving to the next column
self.vram_addr.reg = self.vram_addr.reg
+ if self.control.get_increment_mode() > 1 {
32
} else {
1
};
// However, if the address was in the palette range, the
// data is not delayed, so it returns immediately
if self.vram_addr.reg >= 0x3F00 {
self.ppu_data_buffer
} else {
data
}
}
_ => 0,
}
}
}
pub fn cpu_write(&mut self, addr: u16, data: u8) {
match addr {
// Control
0x0000 => {
self.control.reg = data;
self.tram_addr
.set_nametable_x(self.control.get_nametable_x());
self.tram_addr
.set_nametable_y(self.control.get_nametable_y());
}
// Mask
0x0001 => {
self.mask.reg = data;
}
// Status
0x0002 => {}
// OAM Address
0x0003 => {
self.oam_addr = data;
}
// OAM Data
0x0004 => {
self.oam_write(self.oam_addr, data);
}
// Scroll
0x0005 => {
if self.address_latch == 0 {
// First write to scroll register contains X offset in pixel space
// which we split into coarse and fine x values
self.fine_x = data & 0x07;
self.tram_addr.set_coarse_x(data >> 3);
self.address_latch = 1;
} else {
// First write to scroll register contains Y offset in pixel space
// which we split into coarse and fine Y values
self.tram_addr.set_fine_y(data & 0x07);
self.tram_addr.set_coarse_y(data >> 3);
self.address_latch = 0;
}
}
// PPU Address
0x0006 => {
if self.address_latch == 0 {
// PPU address bus can be accessed by CPU via the ADDR and DATA
// registers. The fisrt write to this register latches the high byte
// of the address, the second is the low byte. Note the writes
// are stored in the tram register...
self.tram_addr.reg =
((data as u16 & 0x3F) << 8) | (self.tram_addr.reg & 0x00FF);
self.address_latch = 1;
} else {
// ...when a whole address has been written, the internal vram address
// buffer is updated. Writing to the PPU is unwise during rendering
// as the PPU will maintam the vram address automatically whilst
// rendering the scanline position.
self.tram_addr.reg = (self.tram_addr.reg & 0xFF00) | data as u16;
self.vram_addr = self.tram_addr;
self.address_latch = 0;
}
}
// PPU Data
0x0007 => {
self.ppu_write(self.vram_addr.reg, data);
// All writes from PPU data automatically increment the nametable
// address depending upon the mode set in the control register.
// If set to vertical mode, the increment is 32, so it skips
// one whole nametable row; in horizontal mode it just increments
// by 1, moving to the next column
let increment = if self.control.get_increment_mode() > 0 {
32
} else {
1
};
self.vram_addr.reg = self.vram_addr.reg + increment;
}
_ => {}
}
}
pub fn ppu_read(&mut self, addr: u16) -> u8 {
let mut address = addr & 0x3FFF;
let (should_read, data) = self.chr_rom.read(address);
if should_read {
return data;
}
if address <= 0x1FFF {
println!("read table_pattern");
// If the cartridge cant map the address, have
// a physical location ready here
return self.table_pattern[((address & 0x1000) >> 12) as usize]
[(address & 0x0FFF) as usize];
}
if address >= 0x2000 && address <= 0x3EFF {
address &= 0x0FFF;
if let Mirror::Vertical = self.chr_rom.mirror {
// Vertical
if address <= 0x03FF {
return self.table_name[0][(address & 0x03FF) as usize];
}
if address >= 0x0400 && address <= 0x07FF {
return self.table_name[1][(address & 0x03FF) as usize];
}
if address >= 0x0800 && address <= 0x0BFF {
return self.table_name[0][(address & 0x03FF) as usize];
}
if address >= 0x0C00 && address <= 0x0FFF {
return self.table_name[1][(address & 0x03FF) as usize];
}
}
if let Mirror::Horizontal = self.chr_rom.mirror {
// Horizontal
if address <= 0x03FF {
return self.table_name[0][(address & 0x03FF) as usize];
}
if address >= 0x0400 && address <= 0x07FF {
return self.table_name[0][(address & 0x03FF) as usize];
}
if address >= 0x0800 && address <= 0x0BFF {
return self.table_name[1][(address & 0x03FF) as usize];
}
if address >= 0x0C00 && address <= 0x0FFF {
return self.table_name[1][(address & 0x03FF) as usize];
}
}
return 0;
}
if address >= 0x3F00 && address <= 0x3FFF {
address &= 0x001F;
if address == 0x0010 {
address = 0x0000;
}
if address == 0x0014 {
address = 0x0004;
}
if address == 0x0018 {
address = 0x0008;
}
if address == 0x001C {
address = 0x000C;
}
return self.table_palette[address as usize]
& (if self.mask.get_grayscale() {
0x30
} else {
0x3F
});
}
0
}
pub fn ppu_write(&mut self, addr: u16, data: u8) {
let mut address = addr & 0x3FFF;
if self.chr_rom.write(address, data) {
return;
}
if address <= 0x1FFF {
self.table_pattern[((address & 0x1000) >> 12) as usize][(address & 0x0FFF) as usize] =
data;
return;
}
if address >= 0x2000 && address <= 0x3EFF {
address &= 0x0FFF;
if let Mirror::Vertical = self.chr_rom.mirror {
// Vertical
if address <= 0x03FF {
self.table_name[0][(address & 0x03FF) as usize] = data;
}
if address >= 0x0400 && address <= 0x07FF {
self.table_name[1][(address & 0x03FF) as usize] = data;
}
if address >= 0x0800 && address <= 0x0BFF {
self.table_name[0][(address & 0x03FF) as usize] = data;
}
if address >= 0x0C00 && address <= 0x0FFF {
self.table_name[1][(address & 0x03FF) as usize] = data;
}
return;
}
if let Mirror::Horizontal = self.chr_rom.mirror {
// Horizontal
if address <= 0x03FF {
self.table_name[0][(address & 0x03FF) as usize] = data;
}
if address >= 0x0400 && address <= 0x07FF {
self.table_name[0][(address & 0x03FF) as usize] = data;
}
if address >= 0x0800 && address <= 0x0BFF {
self.table_name[1][(address & 0x03FF) as usize] = data;
}
if address >= 0x0C00 && address <= 0x0FFF {
self.table_name[1][(address & 0x03FF) as usize] = data;
}
}
}
if address >= 0x3F00 && address <= 0x3FFF {
address &= 0x001F;
if address == 0x0010 {
address = 0x0000;
}
if address == 0x0014 {
address = 0x0004;
}
if address == 0x0018 {
address = 0x0008;
}
if address == 0x001C {
address = 0x000C;
}
self.table_palette[address as usize] = data;
}
}
pub fn oam_read(&mut self, addr: u8) -> u8 {
let prop = addr & 0x3; // a struct tem 4 propriedades, então vamos usar os dois ultimo bits (0x3 = 0b11)
let index = (addr / 4) as usize; // obtendo a posicao no array
match prop {
// y
0 => self.oam[index].y,
// id
1 => self.oam[index].id,
// attribute
2 => self.oam[index].attribute,
// x
3 => self.oam[index].x,
_ => 0,
}
}
pub fn oam_write(&mut self, addr: u8, data: u8) {
let prop = addr & 0x3; // a struct tem 4 propriedades, então vamos usar os dois ultimo bits (0x3 = 0b11)
let index = (addr / 4) as usize; // obtendo a posicao no array
match prop {
// y
0 => {
self.oam[index].y = data;
}
// id
1 => {
self.oam[index].id = data;
}
// attribute
2 => {
self.oam[index].attribute = data;
}
// x
3 => {
self.oam[index].x = data;
}
_ => {}
}
}
}
| true |
19213870fe36612dee3150a6eb27188eee8d723f
|
Rust
|
togatoga/competitive-lib
|
/rust/src/lazy_segment_tree.rs
|
UTF-8
| 13,555 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
use cargo_snippet::snippet;
#[allow(clippy::module_inception)]
#[snippet]
/// LazySegmentTree is copied from ac-library-rs
pub mod lazy_segment_tree {
pub trait Monoid {
type S: Clone;
fn identity() -> Self::S;
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S;
}
pub trait MapMonoid {
type M: Monoid;
type F: Clone + PartialEq;
fn identity_element() -> <Self::M as Monoid>::S {
Self::M::identity()
}
fn binary_operation(
a: &<Self::M as Monoid>::S,
b: &<Self::M as Monoid>::S,
) -> <Self::M as Monoid>::S {
Self::M::binary_operation(a, b)
}
fn identity_map() -> Self::F;
fn mapping(f: &Self::F, x: &<Self::M as Monoid>::S) -> <Self::M as Monoid>::S;
fn composition(f: &Self::F, g: &Self::F) -> Self::F;
}
/// max(x1, x2, x3, ...)
pub struct Max<S>(S);
/// min(x1, x2, x3, ..., xn)
pub struct Min<S>(S);
/// x1 + x2 + x3 + ... + xn
pub struct Additive<S>(S);
/// x1 *x2 * x3 * ... * xn
pub struct Multiplicative<S>(S);
/// Implementation macros
macro_rules! impl_monoid {
($($ty:ty),*) => {
$(
impl Monoid for Max<$ty>
{
type S = $ty;
fn identity() -> Self::S {
Self::S::min_value()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
std::cmp::max(*a, *b)
}
}
impl Monoid for Min<$ty>
{
type S = $ty;
fn identity() -> Self::S {
Self::S::max_value()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
std::cmp::min(*a, *b)
}
}
impl Monoid for Additive<$ty>
{
type S = $ty;
fn identity() -> Self::S {
0
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
*a + *b
}
}
impl Monoid for Multiplicative<$ty>
{
type S = $ty;
fn identity() -> Self::S {
1
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
*a * *b
}
}
)*
};
}
impl_monoid!(i8, i16, i32, i64, u8, u16, u32, usize, u64);
pub struct LazySegMentTree<F>
where
F: MapMonoid,
{
n: usize,
log: usize,
size: usize,
d: Vec<<F::M as Monoid>::S>,
lz: Vec<F::F>,
}
impl<F: MapMonoid> From<Vec<<F::M as Monoid>::S>> for LazySegMentTree<F> {
fn from(v: Vec<<F::M as Monoid>::S>) -> Self {
let n = v.len();
let mut log = 0;
let mut size = 1;
while size <= n {
size <<= 1;
log += 1;
}
let mut d = vec![F::identity_element(); 2 * size];
let lz = vec![F::identity_map(); size];
d[size..(size + n)].clone_from_slice(&v);
let mut ret = LazySegMentTree {
n,
size,
log,
d,
lz,
};
for i in (1..size).rev() {
ret.update(i);
}
ret
}
}
impl<F> LazySegMentTree<F>
where
F: MapMonoid,
{
pub fn new(n: usize) -> Self {
vec![F::identity_element(); n].into()
}
fn update(&mut self, k: usize) {
self.d[k] = F::binary_operation(&self.d[2 * k], &self.d[2 * k + 1]);
}
fn all_apply(&mut self, k: usize, f: F::F) {
self.d[k] = F::mapping(&f, &self.d[k]);
if k < self.size {
self.lz[k] = F::composition(&f, &self.lz[k]);
}
}
fn push(&mut self, k: usize) {
self.all_apply(2 * k, self.lz[k].clone());
self.all_apply(2 * k + 1, self.lz[k].clone());
self.lz[k] = F::identity_map();
}
/// data[p] = x
/// O(logN)
pub fn set(&mut self, mut p: usize, x: <F::M as Monoid>::S) {
assert!(p < self.n);
p += self.size;
for i in (1..=self.log).rev() {
self.push(p >> i);
}
self.d[p] = x;
for i in 1..=self.log {
self.update(p >> i);
}
}
/// get data[p]
/// O(logN)
pub fn get(&mut self, mut p: usize) -> <F::M as Monoid>::S {
assert!(p < self.n);
p += self.size;
for i in (1..=self.log).rev() {
self.push(p >> i);
}
self.d[p].clone()
}
/// [l, r)
/// binary_operation(l,l+1,l+2,...r-1)
pub fn prod(&mut self, mut l: usize, mut r: usize) -> <F::M as Monoid>::S {
assert!(l <= r && r <= self.n);
if l == r {
return F::identity_element();
}
l += self.size;
r += self.size;
for i in (1..=self.log).rev() {
if ((l >> i) << i) != l {
self.push(l >> i);
}
if ((r >> i) << i) != r {
self.push(r >> i);
}
}
let mut sml = F::identity_element();
let mut smr = F::identity_element();
while l < r {
if l & 1 != 0 {
sml = F::binary_operation(&sml, &self.d[l]);
l += 1;
}
if r & 1 != 0 {
r -= 1;
smr = F::binary_operation(&self.d[r], &smr);
}
l >>= 1;
r >>= 1;
}
F::binary_operation(&sml, &smr)
}
/// [l, r)
/// binary_operation(a[0], ..., a[n - 1])
pub fn all_prod(&self) -> <F::M as Monoid>::S {
self.d[1].clone()
}
/// data[p] = f(data[p])
pub fn apply(&mut self, mut p: usize, f: F::F) {
assert!(p < self.n);
p += self.size;
for i in (1..=self.log).rev() {
self.push(p >> i);
}
self.d[p] = F::mapping(&f, &self.d[p]);
for i in 1..=self.log {
self.update(p >> i);
}
}
/// [l, r)
/// data[p] = f(data[p]) p=l,l+1,...r-1
pub fn apply_range(&mut self, mut l: usize, mut r: usize, f: F::F) {
assert!(l <= r && r <= self.n);
if l == r {
return;
}
l += self.size;
r += self.size;
for i in (1..=self.log).rev() {
if ((l >> i) << i) != l {
self.push(l >> i);
}
if ((r >> i) << i) != r {
self.push((r - 1) >> i);
}
}
{
let l2 = l;
let r2 = r;
while l < r {
if l & 1 != 0 {
self.all_apply(l, f.clone());
l += 1;
}
if r & 1 != 0 {
r -= 1;
self.all_apply(r, f.clone());
}
l >>= 1;
r >>= 1;
}
l = l2;
r = r2;
}
for i in 1..=self.log {
if ((l >> i) << i) != l {
self.update(l >> i);
}
if ((r >> i) << i) != r {
self.update((r - 1) >> i);
}
}
}
}
use std::fmt::{Debug, Error, Formatter};
impl<F> Debug for LazySegMentTree<F>
where
F: MapMonoid,
F::F: Debug,
<F::M as Monoid>::S: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
for i in 0..self.log {
f.write_fmt(format_args!("{:?}\t", self.d[self.log + i]))?;
}
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use super::lazy_segment_tree::{self, *};
use rand::{thread_rng, Rng};
struct MaxAdd;
impl MapMonoid for MaxAdd {
type M = Max<i32>;
type F = i32;
fn identity_map() -> Self::F {
0
}
fn mapping(&f: &i32, &x: &i32) -> i32 {
f + x
}
fn composition(&f: &i32, &g: &i32) -> i32 {
f + g
}
}
#[test]
fn test_max_add() {
let mut rng = thread_rng();
let mut seq: Vec<i32> = (0..1000).map(|_| rng.gen_range(0, 1000)).collect();
let n = seq.len();
let mut seg: LazySegMentTree<MaxAdd> = LazySegMentTree::new(n);
for (i, x) in seq.iter().enumerate() {
seg.set(i, *x);
}
(0..100).for_each(|_| {
let left = rng.gen_range(0, n);
let right = rng.gen_range(left, n) + 1;
let value = rng.gen_range(0, 100);
(left..right).for_each(|i| {
seq[i] += value;
});
let seq_max = *seq.iter().skip(left).take(right - left).max().unwrap();
seg.apply_range(left, right, value);
let seg_max = seg.prod(left, right);
assert_eq!(seq_max, seg_max);
(left..right).for_each(|i| {
assert_eq!(seg.prod(i, i + 1), seq[i]);
});
});
}
use super::super::mod_int::mod_int;
type ModInt = mod_int::ModInt<i64, mod_int::Mod1000000007>;
struct AdditiveMulMod;
impl Monoid for Additive<ModInt> {
type S = ModInt;
fn identity() -> Self::S {
ModInt::new(0)
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
*a + *b
}
}
impl MapMonoid for AdditiveMulMod {
type M = Additive<ModInt>;
type F = i64;
fn identity_map() -> Self::F {
1
}
fn mapping(&f: &Self::F, &x: &ModInt) -> ModInt {
x * f
}
fn composition(f: &Self::F, g: &Self::F) -> Self::F {
f * g
}
}
#[test]
fn test_additive_mul_mod() {
let mut rng = thread_rng();
let mut seq: Vec<ModInt> = (0..1000)
.map(|_| rng.gen_range(0, 1000))
.map(ModInt::new)
.collect();
let n = seq.len();
let mut seg: LazySegMentTree<AdditiveMulMod> = LazySegMentTree::from(seq.clone());
(0..100).for_each(|_| {
let left = rng.gen_range(0, n);
let right = rng.gen_range(left, n) + 1;
let value = rng.gen_range(0, 100);
(left..right).for_each(|i| {
seq[i] *= value;
});
let seq_total_mod = seq
.iter()
.skip(left)
.take(right - left)
.fold(ModInt::new(0), |x, y| x + *y);
seg.apply_range(left, right, value);
let seg_total_mod = seg.prod(left, right);
assert_eq!(seq_total_mod, seg_total_mod);
(left..right).for_each(|i| {
assert_eq!(seg.prod(i, i + 1), seq[i]);
});
});
}
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
struct UpdateAndSumValue {
value: i64,
size: i64,
}
impl Monoid for Additive<UpdateAndSumValue> {
type S = UpdateAndSumValue;
fn identity() -> Self::S {
UpdateAndSumValue { value: 0, size: 0 }
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
UpdateAndSumValue {
value: a.value + b.value,
size: a.size + b.size,
}
}
}
impl MapMonoid for UpdateAndSumValue {
type M = Additive<UpdateAndSumValue>;
type F = i64;
fn identity_map() -> Self::F {
1i64 << 62
}
fn mapping(&f: &Self::F, &x: &UpdateAndSumValue) -> UpdateAndSumValue {
if f == UpdateAndSumValue::identity_map() {
x
} else {
UpdateAndSumValue {
value: x.size * f,
size: x.size,
}
}
}
fn composition(f: &Self::F, g: &Self::F) -> Self::F {
if *f == UpdateAndSumValue::identity_map() {
*g
} else {
*f
}
}
}
/// Range Update and Range Sum
/// update(s, t, x): As,As1,As2,...Ast -> x
/// getSum(s, t): As+As1+As2+...+Ast
#[test]
fn test_range_update_and_range_sum() {
let mut seg = lazy_segment_tree::LazySegMentTree::<UpdateAndSumValue>::new(6);
for i in 0..6 {
seg.set(i, UpdateAndSumValue { value: 0, size: 1 });
}
seg.apply_range(1, 4, 1);
seg.apply_range(2, 5, -2);
assert_eq!(seg.prod(0, 6).value, -5);
assert_eq!(seg.prod(0, 2).value, 1);
seg.apply_range(3, 6, 3);
assert_eq!(seg.prod(3, 5).value, 6);
assert_eq!(seg.prod(0, 6).value, 8);
}
}
| true |
e38a4a12eaf3e88241ad2bb5f48103626fe19bd0
|
Rust
|
zeromq/zmq.rs
|
/examples/task_sink.rs
|
UTF-8
| 1,008 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
mod async_helpers;
use std::error::Error;
use std::io::Write;
use std::time::Instant;
use zeromq::{Socket, SocketRecv, SocketSend};
#[async_helpers::main]
async fn main() -> Result<(), Box<dyn Error>> {
// Socket to receive messages on
let mut receiver = zeromq::PullSocket::new();
receiver.bind("tcp://127.0.0.1:5558").await?;
// Socket for worker control
let mut controller = zeromq::PubSocket::new();
controller.bind("tcp://127.0.0.1:5559").await?;
receiver.recv().await?;
let tstart = Instant::now();
for task_nbr in 0..100u8 {
receiver.recv().await?;
if task_nbr % 10 == 0 {
print!(":");
} else {
print!(".");
}
std::io::stdout().flush()?;
}
println!(
"\nTotal elapsed time: {} msec",
tstart.elapsed().as_millis()
);
// Send kill signal to workers
controller.send("KILL".into()).await?;
receiver.close().await;
controller.close().await;
Ok(())
}
| true |
3b1b00232fc570a70996728339cb30e5bda5ff00
|
Rust
|
CyberFlameGO/abi_stable_crates
|
/abi_stable/src/type_layout/tl_fields.rs
|
UTF-8
| 6,334 | 2.984375 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
use super::*;
use std::{
iter,
slice,
};
/// The layout of all compressed fields in a type definition,
/// one can access the expanded fields by calling the expand method.
#[repr(C)]
#[derive(Copy, Clone, StableAbi)]
#[sabi(unsafe_sabi_opaque_fields)]
pub struct CompTLFields {
/// All TLField fields which map 1:1.
comp_fields:*const CompTLField,
/// All the function pointer types in the field.
functions:Option<&'static TLFunctions >,
comp_fields_len:u16,
}
unsafe impl Sync for CompTLFields {}
unsafe impl Send for CompTLFields {}
impl CompTLFields{
/// A `CompTLFields` with no fields.
pub const EMPTY:Self=Self::from_fields(rslice![]);
/// Constructs a `CompTLFields`.
pub const fn new(
comp_fields:RSlice<'static,CompTLFieldRepr>,
functions:Option<&'static TLFunctions >,
)->Self{
Self{
comp_fields:comp_fields.as_ptr()
as *const CompTLFieldRepr
as *const CompTLField,
comp_fields_len:comp_fields.len() as u16,
functions,
}
}
/// Constructs a `CompTLFields` with fields,and without functions.
pub const fn from_fields(
comp_fields:RSlice<'static,CompTLField>,
)->Self{
Self{
comp_fields:comp_fields.as_ptr(),
comp_fields_len:comp_fields.len() as u16,
functions:None,
}
}
/// Accesses a slice of all the compressed fields in this `CompTLFields`.
pub fn comp_fields(&self)->&'static [CompTLField] {
unsafe{
slice::from_raw_parts(self.comp_fields,self.comp_fields_len as usize)
}
}
/// Accesses a slice of all the compressed fields in this `CompTLFields`.
pub fn comp_fields_rslice(&self)->RSlice<'static,CompTLField> {
unsafe{
RSlice::from_raw_parts(self.comp_fields,self.comp_fields_len as usize)
}
}
/// Constructs an iterator over all the field names.
pub fn field_names(
&self,
shared_vars:&MonoSharedVars,
)->impl ExactSizeIterator<Item=&'static str>+Clone+'static{
let fields=self.comp_fields();
let strings=shared_vars.strings();
fields.iter().map(move|field| field.name(strings) )
}
/// Gets the name of the nth field.
pub fn get_field_name(&self,index:usize,shared_vars:&MonoSharedVars)-> Option<&'static str> {
let strings=shared_vars.strings();
self.comp_fields()
.get(index)
.map(|f| f.name(strings) )
}
/// The amount of fields this represents
pub fn len(&self)->usize{
self.comp_fields_len as usize
}
/// Whether there are no fields.
pub fn is_empty(&self) -> bool {
self.comp_fields_len == 0
}
/// Expands this into a TLFields,allowing access to expanded fields.
pub fn expand(self,shared_vars:&'static SharedVars)->TLFields{
TLFields{
shared_vars,
comp_fields:self.comp_fields_rslice(),
functions:self.functions,
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// The layout of all the fields in a type definition.
#[repr(C)]
#[derive(Copy, Clone, StableAbi)]
pub struct TLFields {
shared_vars:&'static SharedVars,
comp_fields:RSlice<'static,CompTLField>,
/// All the function pointer types in the field.
functions:Option<&'static TLFunctions >,
}
impl TLFields{
/// Constructs a TLFields from the compressed fields,without any functions.
pub fn from_fields(
comp_fields:&'static [CompTLField],
shared_vars:&'static SharedVars,
)->Self{
Self{
comp_fields:comp_fields.into(),
shared_vars,
functions:None,
}
}
/// The amount of fields this represents
pub fn len(&self)->usize{
self.comp_fields.len()
}
/// Whether this contains any fields
pub fn is_empty(&self)->bool{
self.comp_fields.is_empty()
}
/// Gets the ith expanded field.Returns None there is no ith field.
pub fn get(&self,i:usize)->Option<TLField>{
self.comp_fields.get(i)
.map(|field| field.expand(i,self.functions,self.shared_vars) )
}
/// Gets an iterator over the expanded fields.
pub fn iter(&self)->TLFieldsIterator{
TLFieldsIterator{
shared_vars:self.shared_vars,
comp_fields:self.comp_fields.as_slice().iter().enumerate(),
functions:self.functions,
}
}
/// Collects the expanded fields into a `Vec<TLField>`.
pub fn to_vec(&self)->Vec<TLField>{
self.iter().collect()
}
}
impl IntoIterator for TLFields {
type IntoIter=TLFieldsIterator;
type Item=TLField;
#[inline]
fn into_iter(self)->Self::IntoIter{
self.iter()
}
}
impl Debug for TLFields{
fn fmt(&self,f:&mut fmt::Formatter<'_>)->fmt::Result{
f.debug_list()
.entries(self.iter())
.finish()
}
}
impl Display for TLFields {
fn fmt(&self,f:&mut fmt::Formatter<'_>)->fmt::Result{
for field in self.iter() {
Display::fmt(&field,f)?;
writeln!(f)?;
}
Ok(())
}
}
impl Eq for TLFields{}
impl PartialEq for TLFields{
fn eq(&self,other:&Self)->bool{
self.iter().eq(other.iter())
}
}
///////////////////////////////////////////////////////////////////////////////
/**
An iterator over all the fields in a type definition.
*/
#[derive(Clone,Debug)]
pub struct TLFieldsIterator {
shared_vars:&'static SharedVars,
comp_fields:iter::Enumerate<slice::Iter<'static,CompTLField>>,
/// All the function pointer types in the field.
functions:Option<&'static TLFunctions >,
}
impl Iterator for TLFieldsIterator{
type Item=TLField;
fn next(&mut self)->Option<TLField>{
self.comp_fields.next()
.map(|(i,field)|{
field.expand(i,self.functions,self.shared_vars)
})
}
fn size_hint(&self)->(usize,Option<usize>){
let len=self.comp_fields.len();
(len,Some(len))
}
fn count(self) -> usize {
self.comp_fields.len()
}
}
impl std::iter::ExactSizeIterator for TLFieldsIterator{}
| true |
2f1a4f8d66b4dc297566ddb5109200825fb9344b
|
Rust
|
ijanos/euler-rust
|
/src/euler14.rs
|
UTF-8
| 545 | 3.25 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
// I thought I need to memoize this but it is plenty fast already
fn collatz_length(a: u32) -> u32 {
let mut length = 1;
let mut n: u64 = a as u64;
while n != 1 {
n = if n % 2 == 0 {
n / 2
} else {
3 * n + 1
};
length += 1;
}
length
}
pub fn main() {
let mut max = 0;
let mut maxi = 0;
for i in 2..1_000_000 {
let cl = collatz_length(i);
if cl > max {
max = cl;
maxi = i;
}
}
println!("{}", maxi);
}
| true |
15223932ba5972c70eea6ef8c2a4ca6b785128b8
|
Rust
|
ScottDillman/rune
|
/crates/rune-ssa/src/lib.rs
|
UTF-8
| 1,160 | 2.5625 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! The state machine assembler of Rune.
#![allow(clippy::new_without_default)]
mod block;
mod constant;
mod error;
mod global;
mod internal;
mod phi;
mod program;
mod term;
mod value;
pub use self::block::Block;
pub use self::constant::Constant;
pub use self::error::Error;
pub use self::global::{Assign, BlockId, ConstId, StaticId, Var};
pub use self::phi::Phi;
pub use self::program::Program;
pub use self::term::Term;
pub use self::value::Value;
#[cfg(test)]
mod tests {
use super::{Constant, Error, Program};
#[test]
fn test_basic_program() -> Result<(), Error> {
let mut program = Program::new();
let end = program.block();
let entry = program.named("main");
let then_block = program.block();
let a = entry.input()?;
let b = entry.constant(Constant::Integer(10))?;
let condition = entry.cmp_lt(a, b)?;
entry.jump_if(condition, &then_block, &end)?;
let c = then_block.constant(Constant::Integer(1))?;
then_block.assign_add(a, a, c)?;
then_block.jump(&end)?;
end.return_(a)?;
println!("{}", program.dump());
Ok(())
}
}
| true |
80060d31eab3ea1b5f14ec36d195a7114ecad1fa
|
Rust
|
acmcarther/cargo-raze-examples
|
/bazel/complicated_cargo_library/cargo/vendor/arrayvec-0.3.25/tests/generic_array.rs
|
UTF-8
| 463 | 2.546875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
#![cfg(feature = "use_generic_array")]
extern crate arrayvec;
#[macro_use]
extern crate generic_array;
use arrayvec::ArrayVec;
use generic_array::GenericArray;
use generic_array::typenum::U41;
#[test]
fn test_simple() {
let mut vec: ArrayVec<GenericArray<i32, U41>> = ArrayVec::new();
assert_eq!(vec.len(), 0);
assert_eq!(vec.capacity(), 41);
vec.extend(0..20);
assert_eq!(vec.len(), 20);
assert_eq!(&vec[..5], &[0, 1, 2, 3, 4]);
}
| true |
f79c4161a3a63eb9fe819dde81d1df030cfef4ce
|
Rust
|
occlum/occlum
|
/src/libos/src/process/current.rs
|
UTF-8
| 1,064 | 2.96875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
use super::process::IDLE;
use super::{Thread, ThreadRef};
/// Get and set the current thread/process.
use crate::prelude::*;
pub fn get() -> ThreadRef {
let current_ptr = CURRENT_THREAD_PTR.with(|cell| cell.get());
let current_ref = unsafe { Arc::from_raw(current_ptr) };
let current_ref_clone = current_ref.clone();
Arc::into_raw(current_ref);
current_ref_clone
}
pub(super) fn set(thread_ref: ThreadRef) {
assert!(thread_ref.tid() > 0);
replace(thread_ref);
}
pub(super) fn reset() -> ThreadRef {
replace(IDLE.clone())
}
fn replace(thread_ref: ThreadRef) -> ThreadRef {
let new_thread_ptr = Arc::into_raw(thread_ref);
let mut old_thread_ptr = CURRENT_THREAD_PTR.with(|cp| cp.replace(new_thread_ptr));
unsafe { Arc::from_raw(old_thread_ptr) }
}
thread_local! {
// By default, the current thread is the idle (tid = 0).
//
// TODO: figure out why RefCell<ThreadRef> is not working as expected
static CURRENT_THREAD_PTR: Cell<*const Thread> = {
Cell::new(Arc::into_raw(IDLE.clone()))
};
}
| true |
fdfc1d80c90f3988f5112003a4d0eba81e4e1dd7
|
Rust
|
qryxip/ac-library-rs-parted
|
/ac-library-rs-parted-segtree/src/lib.rs
|
UTF-8
| 9,445 | 2.8125 | 3 |
[
"CC0-1.0"
] |
permissive
|
// This code was expanded by `xtask`.
extern crate __acl_internal_bit as internal_bit;
extern crate __acl_internal_type_traits as internal_type_traits;
pub use self::segtree::*;
mod segtree {
use super::internal_bit::ceil_pow2;
use super::internal_type_traits::{BoundedAbove, BoundedBelow, One, Zero};
use std::cmp::{max, min};
use std::convert::Infallible;
use std::marker::PhantomData;
use std::ops::{Add, Mul};
// TODO Should I split monoid-related traits to another module?
pub trait Monoid {
type S: Clone;
fn identity() -> Self::S;
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S;
}
pub struct Max<S>(Infallible, PhantomData<fn() -> S>);
impl<S> Monoid for Max<S>
where
S: Copy + Ord + BoundedBelow,
{
type S = S;
fn identity() -> Self::S {
S::min_value()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
max(*a, *b)
}
}
pub struct Min<S>(Infallible, PhantomData<fn() -> S>);
impl<S> Monoid for Min<S>
where
S: Copy + Ord + BoundedAbove,
{
type S = S;
fn identity() -> Self::S {
S::max_value()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
min(*a, *b)
}
}
pub struct Additive<S>(Infallible, PhantomData<fn() -> S>);
impl<S> Monoid for Additive<S>
where
S: Copy + Add<Output = S> + Zero,
{
type S = S;
fn identity() -> Self::S {
S::zero()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
*a + *b
}
}
pub struct Multiplicative<S>(Infallible, PhantomData<fn() -> S>);
impl<S> Monoid for Multiplicative<S>
where
S: Copy + Mul<Output = S> + One,
{
type S = S;
fn identity() -> Self::S {
S::one()
}
fn binary_operation(a: &Self::S, b: &Self::S) -> Self::S {
*a * *b
}
}
impl<M: Monoid> Default for Segtree<M> {
fn default() -> Self {
Segtree::new(0)
}
}
impl<M: Monoid> Segtree<M> {
pub fn new(n: usize) -> Segtree<M> {
vec![M::identity(); n].into()
}
}
impl<M: Monoid> From<Vec<M::S>> for Segtree<M> {
fn from(v: Vec<M::S>) -> Self {
let n = v.len();
let log = ceil_pow2(n as u32) as usize;
let size = 1 << log;
let mut d = vec![M::identity(); 2 * size];
d[size..(size + n)].clone_from_slice(&v);
let mut ret = Segtree { n, size, log, d };
for i in (1..size).rev() {
ret.update(i);
}
ret
}
}
impl<M: Monoid> Segtree<M> {
pub fn set(&mut self, mut p: usize, x: M::S) {
assert!(p < self.n);
p += self.size;
self.d[p] = x;
for i in 1..=self.log {
self.update(p >> i);
}
}
pub fn get(&self, p: usize) -> M::S {
assert!(p < self.n);
self.d[p + self.size].clone()
}
pub fn prod(&self, mut l: usize, mut r: usize) -> M::S {
assert!(l <= r && r <= self.n);
let mut sml = M::identity();
let mut smr = M::identity();
l += self.size;
r += self.size;
while l < r {
if l & 1 != 0 {
sml = M::binary_operation(&sml, &self.d[l]);
l += 1;
}
if r & 1 != 0 {
r -= 1;
smr = M::binary_operation(&self.d[r], &smr);
}
l >>= 1;
r >>= 1;
}
M::binary_operation(&sml, &smr)
}
pub fn all_prod(&self) -> M::S {
self.d[1].clone()
}
pub fn max_right<F>(&self, mut l: usize, f: F) -> usize
where
F: Fn(&M::S) -> bool,
{
assert!(l <= self.n);
assert!(f(&M::identity()));
if l == self.n {
return self.n;
}
l += self.size;
let mut sm = M::identity();
while {
// do
while l % 2 == 0 {
l >>= 1;
}
if !f(&M::binary_operation(&sm, &self.d[l])) {
while l < self.size {
l *= 2;
let res = M::binary_operation(&sm, &self.d[l]);
if f(&res) {
sm = res;
l += 1;
}
}
return l - self.size;
}
sm = M::binary_operation(&sm, &self.d[l]);
l += 1;
// while
{
let l = l as isize;
(l & -l) != l
}
} {}
self.n
}
pub fn min_left<F>(&self, mut r: usize, f: F) -> usize
where
F: Fn(&M::S) -> bool,
{
assert!(r <= self.n);
assert!(f(&M::identity()));
if r == 0 {
return 0;
}
r += self.size;
let mut sm = M::identity();
while {
// do
r -= 1;
while r > 1 && r % 2 == 1 {
r >>= 1;
}
if !f(&M::binary_operation(&self.d[r], &sm)) {
while r < self.size {
r = 2 * r + 1;
let res = M::binary_operation(&self.d[r], &sm);
if f(&res) {
sm = res;
r -= 1;
}
}
return r + 1 - self.size;
}
sm = M::binary_operation(&self.d[r], &sm);
// while
{
let r = r as isize;
(r & -r) != r
}
} {}
0
}
fn update(&mut self, k: usize) {
self.d[k] = M::binary_operation(&self.d[2 * k], &self.d[2 * k + 1]);
}
}
// Maybe we can use this someday
// ```
// for i in 0..=self.log {
// for j in 0..1 << i {
// print!("{}\t", self.d[(1 << i) + j]);
// }
// println!();
// }
// ```
pub struct Segtree<M>
where
M: Monoid,
{
// variable name is _n in original library
n: usize,
size: usize,
log: usize,
d: Vec<M::S>,
}
#[cfg(test)]
mod tests {
use super::super::Segtree;
use super::Max;
#[test]
fn test_max_segtree() {
let base = vec![3, 1, 4, 1, 5, 9, 2, 6, 5, 3];
let n = base.len();
let segtree: Segtree<Max<_>> = base.clone().into();
check_segtree(&base, &segtree);
let mut segtree = Segtree::<Max<_>>::new(n);
let mut internal = vec![i32::min_value(); n];
for i in 0..n {
segtree.set(i, base[i]);
internal[i] = base[i];
check_segtree(&internal, &segtree);
}
segtree.set(6, 5);
internal[6] = 5;
check_segtree(&internal, &segtree);
segtree.set(6, 0);
internal[6] = 0;
check_segtree(&internal, &segtree);
}
//noinspection DuplicatedCode
fn check_segtree(base: &[i32], segtree: &Segtree<Max<i32>>) {
let n = base.len();
#[allow(clippy::needless_range_loop)]
for i in 0..n {
assert_eq!(segtree.get(i), base[i]);
}
for i in 0..=n {
for j in i..=n {
assert_eq!(
segtree.prod(i, j),
base[i..j].iter().max().copied().unwrap_or(i32::min_value())
);
}
}
assert_eq!(
segtree.all_prod(),
base.iter().max().copied().unwrap_or(i32::min_value())
);
for k in 0..=10 {
let f = |&x: &i32| x < k;
for i in 0..=n {
assert_eq!(
Some(segtree.max_right(i, f)),
(i..=n)
.filter(|&j| f(&base[i..j]
.iter()
.max()
.copied()
.unwrap_or(i32::min_value())))
.max()
);
}
for j in 0..=n {
assert_eq!(
Some(segtree.min_left(j, f)),
(0..=j)
.filter(|&i| f(&base[i..j]
.iter()
.max()
.copied()
.unwrap_or(i32::min_value())))
.min()
);
}
}
}
}
}
| true |
14f9f72c621f19fc4311730e8c1fd005b99069be
|
Rust
|
kanerogers/ovr-mobile-sys
|
/src/helpers.rs
|
UTF-8
| 34,628 | 2.671875 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
use super::*;
use std::ptr;
use std::{
ffi::c_void,
mem::{self, MaybeUninit},
};
//-----------------------------------------------------------------
// Matrix helper functions.
//-----------------------------------------------------------------
fn ovrVector4f_MultiplyMatrix4f(a: &ovrMatrix4f, v: &ovrVector4f) -> ovrVector4f {
let mut out: ovrVector4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.x = a.M[0][0] * v.x + a.M[0][1] * v.y + a.M[0][2] * v.z + a.M[0][3] * v.w;
out.y = a.M[1][0] * v.x + a.M[1][1] * v.y + a.M[1][2] * v.z + a.M[1][3] * v.w;
out.z = a.M[2][0] * v.x + a.M[2][1] * v.y + a.M[2][2] * v.z + a.M[2][3] * v.w;
out.w = a.M[3][0] * v.x + a.M[3][1] * v.y + a.M[3][2] * v.z + a.M[3][3] * v.w;
return out;
}
// Use left-multiplication to accumulate transformations.
pub fn ovrMatrix4f_Multiply(a: &ovrMatrix4f, b: &ovrMatrix4f) -> ovrMatrix4f {
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = a.M[0][0] * b.M[0][0]
+ a.M[0][1] * b.M[1][0]
+ a.M[0][2] * b.M[2][0]
+ a.M[0][3] * b.M[3][0];
out.M[1][0] = a.M[1][0] * b.M[0][0]
+ a.M[1][1] * b.M[1][0]
+ a.M[1][2] * b.M[2][0]
+ a.M[1][3] * b.M[3][0];
out.M[2][0] = a.M[2][0] * b.M[0][0]
+ a.M[2][1] * b.M[1][0]
+ a.M[2][2] * b.M[2][0]
+ a.M[2][3] * b.M[3][0];
out.M[3][0] = a.M[3][0] * b.M[0][0]
+ a.M[3][1] * b.M[1][0]
+ a.M[3][2] * b.M[2][0]
+ a.M[3][3] * b.M[3][0];
out.M[0][1] = a.M[0][0] * b.M[0][1]
+ a.M[0][1] * b.M[1][1]
+ a.M[0][2] * b.M[2][1]
+ a.M[0][3] * b.M[3][1];
out.M[1][1] = a.M[1][0] * b.M[0][1]
+ a.M[1][1] * b.M[1][1]
+ a.M[1][2] * b.M[2][1]
+ a.M[1][3] * b.M[3][1];
out.M[2][1] = a.M[2][0] * b.M[0][1]
+ a.M[2][1] * b.M[1][1]
+ a.M[2][2] * b.M[2][1]
+ a.M[2][3] * b.M[3][1];
out.M[3][1] = a.M[3][0] * b.M[0][1]
+ a.M[3][1] * b.M[1][1]
+ a.M[3][2] * b.M[2][1]
+ a.M[3][3] * b.M[3][1];
out.M[0][2] = a.M[0][0] * b.M[0][2]
+ a.M[0][1] * b.M[1][2]
+ a.M[0][2] * b.M[2][2]
+ a.M[0][3] * b.M[3][2];
out.M[1][2] = a.M[1][0] * b.M[0][2]
+ a.M[1][1] * b.M[1][2]
+ a.M[1][2] * b.M[2][2]
+ a.M[1][3] * b.M[3][2];
out.M[2][2] = a.M[2][0] * b.M[0][2]
+ a.M[2][1] * b.M[1][2]
+ a.M[2][2] * b.M[2][2]
+ a.M[2][3] * b.M[3][2];
out.M[3][2] = a.M[3][0] * b.M[0][2]
+ a.M[3][1] * b.M[1][2]
+ a.M[3][2] * b.M[2][2]
+ a.M[3][3] * b.M[3][2];
out.M[0][3] = a.M[0][0] * b.M[0][3]
+ a.M[0][1] * b.M[1][3]
+ a.M[0][2] * b.M[2][3]
+ a.M[0][3] * b.M[3][3];
out.M[1][3] = a.M[1][0] * b.M[0][3]
+ a.M[1][1] * b.M[1][3]
+ a.M[1][2] * b.M[2][3]
+ a.M[1][3] * b.M[3][3];
out.M[2][3] = a.M[2][0] * b.M[0][3]
+ a.M[2][1] * b.M[1][3]
+ a.M[2][2] * b.M[2][3]
+ a.M[2][3] * b.M[3][3];
out.M[3][3] = a.M[3][0] * b.M[0][3]
+ a.M[3][1] * b.M[1][3]
+ a.M[3][2] * b.M[2][3]
+ a.M[3][3] * b.M[3][3];
out
}
// Returns the transpose of a 4x4 matrix.
pub fn ovrMatrix4f_Transpose(a: &ovrMatrix4f) -> ovrMatrix4f {
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = a.M[0][0];
out.M[0][1] = a.M[1][0];
out.M[0][2] = a.M[2][0];
out.M[0][3] = a.M[3][0];
out.M[1][0] = a.M[0][1];
out.M[1][1] = a.M[1][1];
out.M[1][2] = a.M[2][1];
out.M[1][3] = a.M[3][1];
out.M[2][0] = a.M[0][2];
out.M[2][1] = a.M[1][2];
out.M[2][2] = a.M[2][2];
out.M[2][3] = a.M[3][2];
out.M[3][0] = a.M[0][3];
out.M[3][1] = a.M[1][3];
out.M[3][2] = a.M[2][3];
out.M[3][3] = a.M[3][3];
out
}
// Returns a 3x3 minor of a 4x4 matrix.
pub fn ovrMatrix4f_Minor(
m: &ovrMatrix4f,
r0: usize,
r1: usize,
r2: usize,
c0: usize,
c1: usize,
c2: usize,
) -> f32 {
m.M[r0][c0] * (m.M[r1][c1] * m.M[r2][c2] - m.M[r2][c1] * m.M[r1][c2])
- m.M[r0][c1] * (m.M[r1][c0] * m.M[r2][c2] - m.M[r2][c0] * m.M[r1][c2])
+ m.M[r0][c2] * (m.M[r1][c0] * m.M[r2][c1] - m.M[r2][c0] * m.M[r1][c1])
}
// Returns the inverse of a 4x4 matrix.
pub fn ovrMatrix4f_Inverse(m: &ovrMatrix4f) -> ovrMatrix4f {
let rcp_det = 1.0
/ (m.M[0][0] * ovrMatrix4f_Minor(m, 1, 2, 3, 1, 2, 3)
- m.M[0][1] * ovrMatrix4f_Minor(m, 1, 2, 3, 0, 2, 3)
+ m.M[0][2] * ovrMatrix4f_Minor(m, 1, 2, 3, 0, 1, 3)
- m.M[0][3] * ovrMatrix4f_Minor(m, 1, 2, 3, 0, 1, 2));
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = ovrMatrix4f_Minor(m, 1, 2, 3, 1, 2, 3) * rcp_det;
out.M[0][1] = -ovrMatrix4f_Minor(m, 0, 2, 3, 1, 2, 3) * rcp_det;
out.M[0][2] = ovrMatrix4f_Minor(m, 0, 1, 3, 1, 2, 3) * rcp_det;
out.M[0][3] = -ovrMatrix4f_Minor(m, 0, 1, 2, 1, 2, 3) * rcp_det;
out.M[1][0] = -ovrMatrix4f_Minor(m, 1, 2, 3, 0, 2, 3) * rcp_det;
out.M[1][1] = ovrMatrix4f_Minor(m, 0, 2, 3, 0, 2, 3) * rcp_det;
out.M[1][2] = -ovrMatrix4f_Minor(m, 0, 1, 3, 0, 2, 3) * rcp_det;
out.M[1][3] = ovrMatrix4f_Minor(m, 0, 1, 2, 0, 2, 3) * rcp_det;
out.M[2][0] = ovrMatrix4f_Minor(m, 1, 2, 3, 0, 1, 3) * rcp_det;
out.M[2][1] = -ovrMatrix4f_Minor(m, 0, 2, 3, 0, 1, 3) * rcp_det;
out.M[2][2] = ovrMatrix4f_Minor(m, 0, 1, 3, 0, 1, 3) * rcp_det;
out.M[2][3] = -ovrMatrix4f_Minor(m, 0, 1, 2, 0, 1, 3) * rcp_det;
out.M[3][0] = -ovrMatrix4f_Minor(m, 1, 2, 3, 0, 1, 2) * rcp_det;
out.M[3][1] = ovrMatrix4f_Minor(m, 0, 2, 3, 0, 1, 2) * rcp_det;
out.M[3][2] = -ovrMatrix4f_Minor(m, 0, 1, 3, 0, 1, 2) * rcp_det;
out.M[3][3] = ovrMatrix4f_Minor(m, 0, 1, 2, 0, 1, 2) * rcp_det;
out
}
// Returns a 4x4 identity matrix.
pub fn ovrMatrix4f_CreateIdentity() -> ovrMatrix4f {
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = 1.0;
out.M[0][1] = 0.0;
out.M[0][2] = 0.0;
out.M[0][3] = 0.0;
out.M[1][0] = 0.0;
out.M[1][1] = 1.0;
out.M[1][2] = 0.0;
out.M[1][3] = 0.0;
out.M[2][0] = 0.0;
out.M[2][1] = 0.0;
out.M[2][2] = 1.0;
out.M[2][3] = 0.0;
out.M[3][0] = 0.0;
out.M[3][1] = 0.0;
out.M[3][2] = 0.0;
out.M[3][3] = 1.0;
out
}
// Returns a 4x4 homogeneous translation matrix.
pub fn ovrMatrix4f_CreateTranslation(x: f32, y: f32, z: f32) -> ovrMatrix4f {
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = 1.0;
out.M[0][1] = 0.0;
out.M[0][2] = 0.0;
out.M[0][3] = x;
out.M[1][0] = 0.0;
out.M[1][1] = 1.0;
out.M[1][2] = 0.0;
out.M[1][3] = y;
out.M[2][0] = 0.0;
out.M[2][1] = 0.0;
out.M[2][2] = 1.0;
out.M[2][3] = z;
out.M[3][0] = 0.0;
out.M[3][1] = 0.0;
out.M[3][2] = 0.0;
out.M[3][3] = 1.0;
out
}
// Returns a 4x4 homogeneous rotation matrix.
pub fn ovrMatrix4f_CreateRotation(radiansX: f32, radiansY: f32, radiansZ: f32) -> ovrMatrix4f {
let sinX = radiansX.sin();
let cosX = radiansX.cos();
let rotationX = ovrMatrix4f {
M: [
[1.0, 0.0, 0.0, 0.0],
[0.0, cosX, -sinX, 0.0],
[0.0, sinX, cosX, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
};
let sinY = radiansY.sin();
let cosY = radiansY.cos();
let rotationY = ovrMatrix4f {
M: [
[cosY, 0.0, sinY, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-sinY, 0.0, cosY, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
};
let sinZ = radiansZ.sin();
let cosZ = radiansZ.cos();
let rotationZ = ovrMatrix4f {
M: [
[cosZ, -sinZ, 0.0, 0.0],
[sinZ, cosZ, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
};
let rotationXY = ovrMatrix4f_Multiply(&rotationY, &rotationX);
ovrMatrix4f_Multiply(&rotationZ, &rotationXY)
}
// Returns a projection matrix based on the specified dimensions.
// The projection matrix transforms -Z=forward, +Y=up, +X=right to the appropriate clip space for the graphics API.
// The far plane is placed at infinity if far_z <= near_z.
// An infinite projection matrix is preferred for rasterization because, except for
// things *right* up against the near plane, it always provides better precision:
// "Tightening the Precision of Perspective Rendering"
// Paul Upchurch, Mathieu Desbrun
// Journal of Graphics Tools, Volume 16, Issue 1, 2012
pub fn ovrMatrix4f_CreateProjection(
min_x: f32,
max_x: f32,
min_y: f32,
max_y: f32,
near_z: f32,
far_z: f32,
) -> ovrMatrix4f {
let width = max_x - min_x;
let height = max_y - min_y;
let offsetZ = near_z; // set to zero for a [0,1] clip space
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
if far_z <= near_z {
// place the far plane at infinity
out.M[0][0] = 2.0 * near_z / width;
out.M[0][1] = 0.0;
out.M[0][2] = (max_x + min_x) / width;
out.M[0][3] = 0.0;
out.M[1][0] = 0.0;
out.M[1][1] = 2.0 * near_z / height;
out.M[1][2] = (max_y + min_y) / height;
out.M[1][3] = 0.0;
out.M[2][0] = 0.0;
out.M[2][1] = 0.0;
out.M[2][2] = -1.0;
out.M[2][3] = -(near_z + offsetZ);
out.M[3][0] = 0.0;
out.M[3][1] = 0.0;
out.M[3][2] = -1.0;
out.M[3][3] = 0.0;
} else {
// normal projection
out.M[0][0] = 2.0 * near_z / width;
out.M[0][1] = 0.0;
out.M[0][2] = (max_x + min_x) / width;
out.M[0][3] = 0.0;
out.M[1][0] = 0.0;
out.M[1][1] = 2.0 * near_z / height;
out.M[1][2] = (max_y + min_y) / height;
out.M[1][3] = 0.0;
out.M[2][0] = 0.0;
out.M[2][1] = 0.0;
out.M[2][2] = -(far_z + offsetZ) / (far_z - near_z);
out.M[2][3] = -(far_z * (near_z + offsetZ)) / (far_z - near_z);
out.M[3][0] = 0.0;
out.M[3][1] = 0.0;
out.M[3][2] = -1.0;
out.M[3][3] = 0.0;
}
out
}
// Returns a projection matrix based on the given FOV.
pub fn ovrMatrix4f_CreateProjectionFov(
fov_degrees_x: f32,
fov_degrees_y: f32,
offset_x: f32,
offset_y: f32,
near_z: f32,
far_z: f32,
) -> ovrMatrix4f {
let half_width = near_z * (fov_degrees_x * (VRAPI_PI as f32 / 180.032 * 0.532)).tan();
let half_height = near_z * (fov_degrees_y * (VRAPI_PI as f32 / 180.032 * 0.532)).tan();
let min_x = offset_x - half_width;
let max_x = offset_x + half_width;
let min_y = offset_y - half_height;
let max_y = offset_y + half_height;
ovrMatrix4f_CreateProjection(min_x, max_x, min_y, max_y, near_z, far_z)
}
// Returns the 4x4 rotation matrix for the given quaternion.
pub fn ovrMatrix4f_CreateFromQuaternion(q: &ovrQuatf) -> ovrMatrix4f {
let ww = q.w * q.w;
let xx = q.x * q.x;
let yy = q.y * q.y;
let zz = q.z * q.z;
let mut out: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
out.M[0][0] = ww + xx - yy - zz;
out.M[0][1] = 2.0 * (q.x * q.y - q.w * q.z);
out.M[0][2] = 2.0 * (q.x * q.z + q.w * q.y);
out.M[0][3] = 0.0;
out.M[1][0] = 2.0 * (q.x * q.y + q.w * q.z);
out.M[1][1] = ww - xx + yy - zz;
out.M[1][2] = 2.0 * (q.y * q.z - q.w * q.x);
out.M[1][3] = 0.0;
out.M[2][0] = 2.0 * (q.x * q.z - q.w * q.y);
out.M[2][1] = 2.0 * (q.y * q.z + q.w * q.x);
out.M[2][2] = ww - xx - yy + zz;
out.M[2][3] = 0.0;
out.M[3][0] = 0.0;
out.M[3][1] = 0.0;
out.M[3][2] = 0.0;
out.M[3][3] = 1.0;
out
}
// Convert a standard projection matrix into a TexCoordsFromTanAngles matrix for
// the primary time warp surface.
pub fn ovrMatrix4f_TanAngleMatrixFromProjection(projection: &ovrMatrix4f) -> ovrMatrix4f {
/*
A projection matrix goes from a view point to NDC, or -1 to 1 space.
Scale and bias to convert that to a 0 to 1 space.
const ovrMatrix3f m =
{ {
{ projection.M[0][0], 0.0, projection.M[0][2] },
{ 0.0, projection.M[1][1], projection.M[1][2] },
{ 0.0, 0.0, -1.0 }
} };
// Note that there is no Y-flip because eye buffers have 0,0 = left-bottom.
const ovrMatrix3f s = ovrMatrix3f_CreateScaling( 0.5, 0.5 );
const ovrMatrix3f t = ovrMatrix3f_CreateTranslation( 0.5, 0.5 );
const ovrMatrix3f r0 = ovrMatrix3f_Multiply( &s, &m );
const ovrMatrix3f r1 = ovrMatrix3f_Multiply( &t, &r0 );
return r1;
clipZ = ( z * projection[2][2] + projection[2][3] ) / ( projection[3][2] * z )
z = projection[2][3] / ( clipZ * projection[3][2] - projection[2][2] )
z = ( projection[2][3] / projection[3][2] ) / ( clipZ - projection[2][2] / projection[3][2] )
*/
let tanAngleMatrix = ovrMatrix4f {
M: [
[
0.5 * projection.M[0][0],
0.0,
0.5 * projection.M[0][2] - 0.5,
0.0,
],
[
0.0,
0.5 * projection.M[1][1],
0.5 * projection.M[1][2] - 0.5,
0.0,
],
[0.0, 0.0, -1.0, 0.0],
// Store the values to convert a clip-Z to a linear depth in the unused matrix elements.
[
projection.M[2][2],
projection.M[2][3],
projection.M[3][2],
1.0,
],
],
};
tanAngleMatrix
}
// If a simple quad defined as a -1 to 1 XY unit square is transformed to
// the camera view with the given modelView matrix, it can alternately be
// drawn as a time warp overlay image to take advantage of the full window
// resolution, which is usually higher than the eye buffer textures, and
// avoids resampling both into the eye buffer, and again to the screen.
// This is used for high quality movie screens and user interface planes.
//
// Note that this is NOT an MVP matrix -- the "projection" is handled
// by the distortion process.
//
// This utility functions converts a model-view matrix that would normally
// draw a -1 to 1 unit square to the view into a TexCoordsFromTanAngles matrix
// for an overlay surface.
//
// The resulting z value should be straight ahead distance to the plane.
// The x and y values will be pre-multiplied by z for projective texturing.
pub fn ovrMatrix4f_TanAngleMatrixFromUnitSquare(modelView: &ovrMatrix4f) -> ovrMatrix4f {
/*
// Take the inverse of the view matrix because the view matrix transforms the unit square
// from world space into view space, while the matrix needed here is the one that transforms
// the unit square from view space to world space.
const ovrMatrix4f inv = ovrMatrix4f_Inverse( modelView );
// This matrix calculates the projection onto the (-1, 1) X and Y axes of the unit square,
// of the intersection of the vector (tanX, tanY, -1) with the plane described by the matrix
// that transforms the unit square into world space.
const ovrMatrix3f m =
{ {
{ inv.M[0][0] * inv.M[2][3] - inv.M[0][3] * inv.M[2][0],
inv.M[0][1] * inv.M[2][3] - inv.M[0][3] * inv.M[2][1],
inv.M[0][2] * inv.M[2][3] - inv.M[0][3] * inv.M[2][2] },
{ inv.M[1][0] * inv.M[2][3] - inv.M[1][3] * inv.M[2][0],
inv.M[1][1] * inv.M[2][3] - inv.M[1][3] * inv.M[2][1],
inv.M[1][2] * inv.M[2][3] - inv.M[1][3] * inv.M[2][2] },
{ - inv.M[2][0],
- inv.M[2][1],
- inv.M[2][2] }
} };
// Flip the Y because textures have 0,0 = left-top as opposed to left-bottom.
const ovrMatrix3f f = ovrMatrix3f_CreateScaling( 1.0, -1.0 );
const ovrMatrix3f s = ovrMatrix3f_CreateScaling( 0.5, 0.5 );
const ovrMatrix3f t = ovrMatrix3f_CreateTranslation( 0.5, 0.5 );
const ovrMatrix3f r0 = ovrMatrix3f_Multiply( &f, &m );
const ovrMatrix3f r1 = ovrMatrix3f_Multiply( &s, &r0 );
const ovrMatrix3f r2 = ovrMatrix3f_Multiply( &t, &r1 );
return r2;
*/
let inv = ovrMatrix4f_Inverse(modelView);
let coef = if inv.M[2][3] > 0.0 { 1.0 } else { -1.0 };
let mut m: ovrMatrix4f = unsafe { MaybeUninit::zeroed().assume_init() };
m.M[0][0] =
(0.5 * (inv.M[0][0] * inv.M[2][3] - inv.M[0][3] * inv.M[2][0]) - 0.5 * inv.M[2][0]) * coef;
m.M[0][1] =
(0.5 * (inv.M[0][1] * inv.M[2][3] - inv.M[0][3] * inv.M[2][1]) - 0.5 * inv.M[2][1]) * coef;
m.M[0][2] =
(0.5 * (inv.M[0][2] * inv.M[2][3] - inv.M[0][3] * inv.M[2][2]) - 0.5 * inv.M[2][2]) * coef;
m.M[0][3] = 0.0;
m.M[1][0] =
(-0.5 * (inv.M[1][0] * inv.M[2][3] - inv.M[1][3] * inv.M[2][0]) - 0.5 * inv.M[2][0]) * coef;
m.M[1][1] =
(-0.5 * (inv.M[1][1] * inv.M[2][3] - inv.M[1][3] * inv.M[2][1]) - 0.5 * inv.M[2][1]) * coef;
m.M[1][2] =
(-0.5 * (inv.M[1][2] * inv.M[2][3] - inv.M[1][3] * inv.M[2][2]) - 0.5 * inv.M[2][2]) * coef;
m.M[1][3] = 0.0;
m.M[2][0] = (-inv.M[2][0]) * coef;
m.M[2][1] = (-inv.M[2][1]) * coef;
m.M[2][2] = (-inv.M[2][2]) * coef;
m.M[2][3] = 0.0;
m.M[3][0] = 0.0;
m.M[3][1] = 0.0;
m.M[3][2] = 0.0;
m.M[3][3] = 1.0;
m
}
// Convert a standard view matrix into a TexCoordsFromTanAngles matrix for
// the looking into a cube map.
pub fn ovrMatrix4f_TanAngleMatrixForCubeMap(viewMatrix: &ovrMatrix4f) -> ovrMatrix4f {
let mut m = *viewMatrix;
// clear translation
for i in 0..3 {
m.M[i][3] = 0.0;
}
ovrMatrix4f_Inverse(&m)
}
// Utility function to calculate external velocity for smooth stick yaw turning.
// To reduce judder in FPS style experiences when the application framerate is
// lower than the vsync rate, the rotation from a joypad can be applied to the
// view space distorted eye vectors before applying the time warp.
pub fn ovrMatrix4f_CalculateExternalVelocity(
viewMatrix: &ovrMatrix4f,
yawRadiansPerSecond: f32,
) -> ovrMatrix4f {
let angle = yawRadiansPerSecond * (-1.0 / 60.0);
let sinHalfAngle = (angle * 0.5).sin();
let cosHalfAngle = (angle * 0.5).cos();
// Yaw is always going to be around the world Y axis
let mut quat: ovrQuatf = unsafe { MaybeUninit::zeroed().assume_init() };
quat.x = viewMatrix.M[0][1] * sinHalfAngle;
quat.y = viewMatrix.M[1][1] * sinHalfAngle;
quat.z = viewMatrix.M[2][1] * sinHalfAngle;
quat.w = cosHalfAngle;
ovrMatrix4f_CreateFromQuaternion(&quat)
}
fn ovrVector3f_RotateAboutPivot(
rotation: &ovrQuatf,
pivot: &ovrVector3f,
point: &ovrVector3f,
) -> ovrVector3f {
let t0 = ovrMatrix4f_CreateTranslation(pivot.x, pivot.y, pivot.z);
let r = ovrMatrix4f_CreateFromQuaternion(rotation);
let t1 = ovrMatrix4f_CreateTranslation(-pivot.x, -pivot.y, -pivot.z);
let c0 = ovrMatrix4f_Multiply(&t0, &r);
let c1 = ovrMatrix4f_Multiply(&c0, &t1);
let v = ovrVector4f {
x: point.x,
y: point.y,
z: point.z,
w: 1.0f32,
};
let v2 = ovrVector4f_MultiplyMatrix4f(&c1, &v);
let v3 = ovrVector3f {
x: v2.x,
y: v2.y,
z: v2.z,
};
return v3;
}
//-----------------------------------------------------------------
// Default initialization helper functions.
//-----------------------------------------------------------------
// Utility function to default initialize the ovrInitParms.
pub fn vrapi_DefaultInitParms(java: *const ovrJava) -> ovrInitParms {
let mut parms: ovrInitParms = unsafe { MaybeUninit::zeroed().assume_init() };
parms.Type = ovrStructureType::VRAPI_STRUCTURE_TYPE_INIT_PARMS;
parms.ProductVersion = VRAPI_PRODUCT_VERSION as i32;
parms.MajorVersion = VRAPI_MAJOR_VERSION as i32;
parms.MinorVersion = VRAPI_MINOR_VERSION as i32;
parms.PatchVersion = VRAPI_PATCH_VERSION as i32;
parms.GraphicsAPI = ovrGraphicsAPI::VRAPI_GRAPHICS_API_OPENGL_ES_3;
parms.Java = unsafe { *java };
return parms;
}
// Utility function to default initialize the ovrModeParms.
pub fn vrapi_DefaultModeParms(java: *const ovrJava) -> ovrModeParms {
let mut parms: ovrModeParms = unsafe { MaybeUninit::zeroed().assume_init() };
parms.Type = ovrStructureType::VRAPI_STRUCTURE_TYPE_MODE_PARMS;
parms.Flags |= ovrModeFlags::VRAPI_MODE_FLAG_RESET_WINDOW_FULLSCREEN as u32;
parms.Java = unsafe { *java };
parms
}
// Utility function to default initialize the ovrPerformanceParms.
pub fn vrapi_DefaultPerformanceParms() -> ovrPerformanceParms {
let mut parms: ovrPerformanceParms = unsafe { MaybeUninit::zeroed().assume_init() };
parms.CpuLevel = 2;
parms.GpuLevel = 2;
parms.MainThreadTid = 0;
parms.RenderThreadTid = 0;
parms
}
// Utility function to default initialize the ovrFrameParms.
pub fn vrapi_DefaultFrameParms(
java: *const ovrJava,
init: ovrFrameInit,
currentTime: f64,
textureSwapChain: *mut ovrTextureSwapChain,
) -> ovrFrameParms {
let projectionMatrix = ovrMatrix4f_CreateProjectionFov(90.0, 90.0, 0.0, 0.0, 0.1, 0.0);
let texCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection(&projectionMatrix);
let mut parms: ovrFrameParms = unsafe { MaybeUninit::zeroed().assume_init() };
parms.Type = ovrStructureType::VRAPI_STRUCTURE_TYPE_FRAME_PARMS;
for layer in 0..ovrFrameLayerType::VRAPI_FRAME_LAYER_TYPE_MAX as usize {
parms.Layers[layer].ColorScale = 1.0;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
parms.Layers[layer].Textures[eye].TexCoordsFromTanAngles = texCoordsFromTanAngles;
parms.Layers[layer].Textures[eye].TextureRect.width = 1.0;
parms.Layers[layer].Textures[eye].TextureRect.height = 1.0;
parms.Layers[layer].Textures[eye]
.HeadPose
.Pose
.Orientation
.w = 1.0;
parms.Layers[layer].Textures[eye].HeadPose.TimeInSeconds = currentTime;
}
}
parms.LayerCount = 1;
parms.SwapInterval = 1;
parms.ExtraLatencyMode = ovrExtraLatencyMode::VRAPI_EXTRA_LATENCY_MODE_OFF;
parms.PerformanceParms = vrapi_DefaultPerformanceParms();
parms.Java = unsafe { *java };
parms.Layers[0].SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
parms.Layers[0].DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
parms.Layers[0].Flags = 0;
parms.Layers[1].SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_SRC_ALPHA;
parms.Layers[1].DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE_MINUS_SRC_ALPHA;
parms.Layers[1].Flags = 0;
match init {
ovrFrameInit::VRAPI_FRAME_INIT_BLACK
| ovrFrameInit::VRAPI_FRAME_INIT_BLACK_FLUSH
| ovrFrameInit::VRAPI_FRAME_INIT_BLACK_FINAL => {
// NOTE: When requesting a solid black frame, set ColorScale to 0.0f
parms.Layers[0].ColorScale = 0.0;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
parms.Layers[0].Textures[eye].ColorTextureSwapChain = unsafe {
mem::transmute(
ovrDefaultTextureSwapChain::VRAPI_DEFAULT_TEXTURE_SWAPCHAIN as usize,
)
};
}
}
ovrFrameInit::VRAPI_FRAME_INIT_LOADING_ICON
| ovrFrameInit::VRAPI_FRAME_INIT_LOADING_ICON_FLUSH => {
parms.LayerCount = 2;
// NOTE: When requesting a solid black frame, set ColorScale to 0.0f
parms.Layers[0].ColorScale = 0.0;
parms.Layers[1].Flags = ovrFrameLayerFlags::VRAPI_FRAME_LAYER_FLAG_SPIN as i32;
parms.Layers[1].SpinSpeed = 1.0; // rotation in radians per second
parms.Layers[1].SpinScale = 16.0; // icon size factor smaller than fullscreen
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
parms.Layers[0].Textures[eye].ColorTextureSwapChain = unsafe {
mem::transmute(
ovrDefaultTextureSwapChain::VRAPI_DEFAULT_TEXTURE_SWAPCHAIN as usize,
)
};
parms.Layers[1].Textures[eye].ColorTextureSwapChain = if !textureSwapChain.is_null()
{
textureSwapChain
} else {
unsafe {
mem::transmute(
ovrDefaultTextureSwapChain::VRAPI_DEFAULT_TEXTURE_SWAPCHAIN_LOADING_ICON
as usize,
)
}
};
}
}
//ovrFrameInit::VRAPI_FRAME_INIT_DEFAULT
_ => (),
}
if init == ovrFrameInit::VRAPI_FRAME_INIT_BLACK_FLUSH
|| init == ovrFrameInit::VRAPI_FRAME_INIT_LOADING_ICON_FLUSH
{
parms.Flags |= ovrFrameFlags::VRAPI_FRAME_FLAG_FLUSH as i32;
}
if init == ovrFrameInit::VRAPI_FRAME_INIT_BLACK_FINAL {
parms.Flags |= ovrFrameFlags::VRAPI_FRAME_FLAG_FLUSH as i32
| ovrFrameFlags::VRAPI_FRAME_FLAG_FINAL as i32;
}
return parms;
}
//-----------------------------------------------------------------
// Layer Types - default initialization.
//-----------------------------------------------------------------
pub fn vrapi_DefaultLayerProjection2() -> ovrLayerProjection2 {
let mut layer: ovrLayerProjection2 =
unsafe { MaybeUninit::<ovrLayerProjection2>::zeroed().assume_init() };
let projectionMatrix =
ovrMatrix4f_CreateProjectionFov(90.0f32, 90.0f32, 0.0f32, 0.0f32, 0.1f32, 0.0f32);
let texCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection(&projectionMatrix);
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_PROJECTION2;
layer.Header.Flags = 0;
layer.Header.ColorScale.x = 1.0f32;
layer.Header.ColorScale.y = 1.0f32;
layer.Header.ColorScale.z = 1.0f32;
layer.Header.ColorScale.w = 1.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
layer.Header.Reserved = ptr::null_mut::<c_void>();
layer.HeadPose.Pose.Orientation.w = 1.0f32;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
layer.Textures[eye].TexCoordsFromTanAngles = texCoordsFromTanAngles;
layer.Textures[eye].TextureRect.x = 0.0f32;
layer.Textures[eye].TextureRect.y = 0.0f32;
layer.Textures[eye].TextureRect.width = 1.0f32;
layer.Textures[eye].TextureRect.height = 1.0f32;
}
return layer;
}
pub fn vrapi_DefaultLayerBlackProjection2() -> ovrLayerProjection2 {
let mut layer: ovrLayerProjection2 = unsafe { MaybeUninit::zeroed().assume_init() };
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_PROJECTION2;
layer.Header.Flags = 0;
// NOTE: When requesting a solid black frame, set ColorScale to { 0.0f, 0.0f, 0.0f, 0.0f }
layer.Header.ColorScale.x = 0.0f32;
layer.Header.ColorScale.y = 0.0f32;
layer.Header.ColorScale.z = 0.0f32;
layer.Header.ColorScale.w = 0.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
layer.HeadPose.Pose.Orientation.w = 1.0;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
layer.Textures[eye].SwapChainIndex = 0;
layer.Textures[eye].ColorSwapChain = unsafe {
mem::transmute(ovrDefaultTextureSwapChain::VRAPI_DEFAULT_TEXTURE_SWAPCHAIN as usize)
};
}
return layer;
}
pub fn vrapi_DefaultLayerCylinder2() -> ovrLayerCylinder2 {
let mut layer: ovrLayerCylinder2 = unsafe { MaybeUninit::zeroed().assume_init() };
let projectionMatrix =
ovrMatrix4f_CreateProjectionFov(90.0f32, 90.0f32, 0.0f32, 0.0f32, 0.1f32, 0.0f32);
let texCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection(&projectionMatrix);
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_CYLINDER2;
layer.Header.Flags = 0;
layer.Header.ColorScale.x = 1.0f32;
layer.Header.ColorScale.y = 1.0f32;
layer.Header.ColorScale.z = 1.0f32;
layer.Header.ColorScale.w = 1.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
layer.HeadPose.Pose.Orientation.w = 1.0f32;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
layer.Textures[eye].TexCoordsFromTanAngles = texCoordsFromTanAngles;
layer.Textures[eye].TextureRect.x = 0.0f32;
layer.Textures[eye].TextureRect.y = 0.0f32;
layer.Textures[eye].TextureRect.width = 1.0f32;
layer.Textures[eye].TextureRect.height = 1.0f32;
layer.Textures[eye].TextureMatrix.M[0][0] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[1][1] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[2][2] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[3][3] = 1.0f32;
}
return layer;
}
pub fn vrapi_DefaultLayerCube2() -> ovrLayerCube2 {
let mut layer: ovrLayerCube2 = unsafe { MaybeUninit::zeroed().assume_init() };
let projectionMatrix =
ovrMatrix4f_CreateProjectionFov(90.0f32, 90.0f32, 0.0f32, 0.0f32, 0.1f32, 0.0f32);
let texCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection(&projectionMatrix);
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_CUBE2;
layer.Header.Flags = 0;
layer.Header.ColorScale.x = 1.0f32;
layer.Header.ColorScale.y = 1.0f32;
layer.Header.ColorScale.z = 1.0f32;
layer.Header.ColorScale.w = 1.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
layer.HeadPose.Pose.Orientation.w = 1.0f32;
layer.TexCoordsFromTanAngles = texCoordsFromTanAngles;
layer.Offset.x = 0.0f32;
layer.Offset.y = 0.0f32;
layer.Offset.z = 0.0f32;
return layer;
}
pub fn vrapi_DefaultLayerEquirect2() -> ovrLayerEquirect2 {
let mut layer: ovrLayerEquirect2 = unsafe { MaybeUninit::zeroed().assume_init() };
let projectionMatrix =
ovrMatrix4f_CreateProjectionFov(90.0f32, 90.0f32, 0.0f32, 0.0f32, 0.1f32, 0.0f32);
let texCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection(&projectionMatrix);
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_EQUIRECT2;
layer.Header.Flags = 0;
layer.Header.ColorScale.x = 1.0f32;
layer.Header.ColorScale.y = 1.0f32;
layer.Header.ColorScale.z = 1.0f32;
layer.Header.ColorScale.w = 1.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ZERO;
layer.HeadPose.Pose.Orientation.w = 1.0f32;
layer.TexCoordsFromTanAngles = texCoordsFromTanAngles;
for eye in 0..ovrFrameLayerEye::VRAPI_FRAME_LAYER_EYE_MAX as usize {
layer.Textures[eye].TextureRect.x = 0.0f32;
layer.Textures[eye].TextureRect.y = 0.0f32;
layer.Textures[eye].TextureRect.width = 1.0f32;
layer.Textures[eye].TextureRect.height = 1.0f32;
layer.Textures[eye].TextureMatrix.M[0][0] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[1][1] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[2][2] = 1.0f32;
layer.Textures[eye].TextureMatrix.M[3][3] = 1.0f32;
}
return layer;
}
pub fn vrapi_DefaultLayerLoadingIcon2() -> ovrLayerLoadingIcon2 {
let mut layer: ovrLayerLoadingIcon2 = unsafe { MaybeUninit::zeroed().assume_init() };
layer.Header.Type = ovrLayerType2_::VRAPI_LAYER_TYPE_LOADING_ICON2;
layer.Header.Flags = 0;
layer.Header.ColorScale.x = 1.0f32;
layer.Header.ColorScale.y = 1.0f32;
layer.Header.ColorScale.z = 1.0f32;
layer.Header.ColorScale.w = 1.0f32;
layer.Header.SrcBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_SRC_ALPHA;
layer.Header.DstBlend = ovrFrameLayerBlend::VRAPI_FRAME_LAYER_BLEND_ONE_MINUS_SRC_ALPHA;
layer.SpinSpeed = 1.0f32;
layer.SpinScale = 16.0f32;
layer.ColorSwapChain = unsafe {
mem::transmute(
ovrDefaultTextureSwapChain::VRAPI_DEFAULT_TEXTURE_SWAPCHAIN_LOADING_ICON as usize,
)
};
layer.SwapChainIndex = 0;
return layer;
}
//-----------------------------------------------------------------
// Eye view matrix helper functions.
//-----------------------------------------------------------------
pub fn vrapi_GetInterpupillaryDistance(tracking2: &ovrTracking2) -> f32 {
let leftView = tracking2.Eye[0].ViewMatrix;
let rightView = tracking2.Eye[1].ViewMatrix;
let delta = ovrVector3f {
x: rightView.M[0][3] - leftView.M[0][3],
y: rightView.M[1][3] - leftView.M[1][3],
z: rightView.M[2][3] - leftView.M[2][3],
};
return (delta.x * delta.x + delta.y * delta.y + delta.z * delta.z).sqrt();
}
pub unsafe fn vrapi_GetEyeHeight(
eyeLevelTrackingPose: &ovrPosef,
currentTrackingPose: &ovrPosef,
) -> f32 {
return eyeLevelTrackingPose.__bindgen_anon_1.Position.y
- currentTrackingPose.__bindgen_anon_1.Position.y;
}
pub unsafe fn vrapi_GetTransformFromPose(pose: &ovrPosef) -> ovrMatrix4f {
let rotation = ovrMatrix4f_CreateFromQuaternion(&pose.Orientation);
let translation = ovrMatrix4f_CreateTranslation(
pose.__bindgen_anon_1.Position.x,
pose.__bindgen_anon_1.Position.y,
pose.__bindgen_anon_1.Position.z,
);
return ovrMatrix4f_Multiply(&translation, &rotation);
}
pub unsafe fn vrapi_GetViewMatrixFromPose(pose: &ovrPosef) -> ovrMatrix4f {
let transform = vrapi_GetTransformFromPose(&pose);
return ovrMatrix4f_Inverse(&transform);
}
// Utility function to get the eye view matrix based on the center eye view matrix and the IPD.
pub fn vrapi_GetEyeViewMatrix(
center_eye_view_matrix: &ovrMatrix4f,
interpupillaryDistance: f32,
eye: i32,
) -> ovrMatrix4f {
let eye_offset = (if eye > 0 { -0.5 } else { 0.5 }) * interpupillaryDistance;
let eye_offset_matrix = ovrMatrix4f_CreateTranslation(eye_offset, 0.0, 0.0);
ovrMatrix4f_Multiply(&eye_offset_matrix, center_eye_view_matrix)
}
| true |
89080a3c30be616b7f9f0418c06dae0a06e67c01
|
Rust
|
omadoyeabraham/rust-book-codealong
|
/chapter-6/playground.rs
|
UTF-8
| 904 | 3.5625 | 4 |
[] |
no_license
|
#[derive(Debug)]
enum IpAddressKind {
V4,
V6
}
#[derive(Debug)]
enum IPAddress {
V4(String),
V6(String)
}
#[derive(Debug)]
struct IpAddress {
kind: IpAddressKind,
address: String
}
fn main() {
route(IpAddressKind::V4);
route(IpAddressKind::V6);
go_to(&IpAddress {
kind: IpAddressKind::V4,
address: String::from("127.0.0.1")
});
go_to(&IpAddress {
kind: IpAddressKind::V6,
address: String::from("::1")
});
println!("{:?}", IPAddress::V4(String::from("127.0.0.1")));
println!("{:?}", IPAddress::V6(String::from("::1")));
let x = 5;
let y: Option<u8> = Some(45);
let sum: Option<u8> = match y {
Some(i) => Some(i + x),
None => None
};
println!("{:?}", sum);
}
fn route(ip: IpAddressKind) {
println!("{:?}", ip);
}
fn go_to(ip: &IpAddress) {
println!("{:#?}", ip);
}
| true |
e9a5ca72937d2fae75f85361f1e92fad105fe104
|
Rust
|
lazmond3/rust-nand2tetris
|
/src/alu.rs
|
UTF-8
| 1,502 | 3.53125 | 4 |
[] |
no_license
|
use crate::bit::Bit;
use crate::not_word::not_word;
use crate::word::Word;
fn alu(
a: Word,
b: Word,
a_is_zero_x: Bit, // zx : a -> 0
b_is_zero_x: Bit, // zy : b -> 0
not_a_x: Bit, // nx : a -> !a
not_b_x: Bit, // ny : b -> !b
functional_x: Bit, // f : when 0 -> add, when 1 -> and
not_out_x: Bit, // no : out -> !out)
) -> (Word, Bit, Bit) {
let n_a: Word = if a_is_zero_x == Bit::I {
Word::new_empty()
} else if not_a_x == Bit::I {
not_word(a)
} else {
a
};
let n_b = if b_is_zero_x == Bit::I {
Word::new_empty()
} else if not_b_x == Bit::I {
not_word(b)
} else {
b
};
let mut res = if functional_x == Bit::O {
n_a + n_b
} else {
n_a & n_b
};
res = if not_out_x == Bit::I {
not_word(res)
} else {
res
};
let zr = Bit::from_bool(res == Word::new_empty());
let ng = Bit::O;
(res, zr, ng)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn for_alu_add() {
let word_05: Word = Word::num_to_bit(5);
let word_03: Word = Word::num_to_bit(3);
assert_eq!(
alu(
word_03.clone(),
word_05.clone(),
Bit::O,
Bit::O,
Bit::O,
Bit::O,
Bit::O, // add
Bit::O
),
(word_03 + word_05, Bit::O, Bit::O)
);
}
}
| true |
005ac4d51cfff218f0cb18b7b2fdb0aeb78788c9
|
Rust
|
Haizzz/zoxide
|
/src/utils.rs
|
UTF-8
| 288 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
use std::process;
pub fn exit_with_message(msg: &str) -> ! {
// print a message to stderr and exit
eprint!("{}", msg);
process::exit(1);
}
pub fn bit_at_index(byte: u8, index: u8) -> bool {
// given a byte, return the bit value at index
(byte & (1 << index)) != 0
}
| true |
f96c8ef385be2849a7798420429d4fa50ea94dfb
|
Rust
|
greendwin/rust_ray
|
/src/math/ray.rs
|
UTF-8
| 1,072 | 3.03125 | 3 |
[] |
no_license
|
use super::vec3::Vec3;
#[derive(Debug, Clone)]
pub struct Hit {
pub pt: Vec3,
pub norm: Vec3,
pub t: f64,
pub front_face: bool,
}
impl Hit {
pub fn new(ray: &Ray, t: f64, pt: Vec3, outward_norm: Vec3) -> Self {
let front_face = ray.dir.dot(outward_norm) < 0.0;
let norm = if front_face {
outward_norm
} else {
-outward_norm
};
Self {
pt,
norm,
t,
front_face,
}
}
}
pub trait HitRay<Mat> {
fn hit(&self, ray: &Ray, t_min: f64, t_max: f64) -> Option<(Hit, Mat)>;
}
#[derive(Debug, Clone)]
pub struct Ray {
pub orig: Vec3,
pub dir: Vec3,
pub time: f64,
}
impl Ray {
pub fn new(origin: impl Into<Vec3>, direction: impl Into<Vec3>, time: impl Into<f64>) -> Self {
Self {
orig: origin.into(),
dir: direction.into(),
time: time.into(),
}
}
#[inline]
pub fn at(&self, t: impl Into<f64>) -> Vec3 {
self.orig + self.dir * t.into()
}
}
| true |
290fd23b46015e97ba97e9d27bc6539b6bfef0b2
|
Rust
|
denningk/blues
|
/src/textures/model_texture.rs
|
UTF-8
| 239 | 2.90625 | 3 |
[] |
no_license
|
pub struct ModelTexture {
texture_id: u32,
}
impl ModelTexture {
pub fn new(texture_id: u32) -> ModelTexture {
ModelTexture { texture_id }
}
pub fn get_texture_id(&self) -> &u32 {
&self.texture_id
}
}
| true |
8ed6e9edc06634ef8dafa4cfa272b1a76fb46e8f
|
Rust
|
whentze/drydock
|
/src/error.rs
|
UTF-8
| 878 | 2.796875 | 3 |
[] |
no_license
|
use core::fmt;
#[derive(Debug, PartialEq, Eq)]
pub enum BadBytes {
LengthMismatch {
wanted: usize,
got: usize,
},
VetFailed,
}
impl fmt::Display for BadBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "the bytes are bad. just really bad.")
}
}
#[cfg(feature = "std")]
#[path = ""]
mod std_stuff {
use super::*;
use std::{error, io};
impl error::Error for BadBytes {}
#[derive(Debug)]
pub enum FromReadError {
Io(io::Error),
BadBytes(BadBytes),
}
impl From<io::Error> for FromReadError {
fn from(inner: io::Error) -> Self {
Self::Io(inner)
}
}
impl From<BadBytes> for FromReadError {
fn from(inner: BadBytes) -> Self {
Self::BadBytes(inner)
}
}
}
#[cfg(feature = "std")]
pub use std_stuff::*;
| true |
21d9194b411650b10a732d54940cee3a346e58cc
|
Rust
|
nickmass/nickmass-com
|
/src/server/db.rs
|
UTF-8
| 576 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
pub use deadpool_redis::Connection;
use std::sync::Arc;
use super::Error;
#[derive(Clone)]
pub struct Db {
pool: Arc<deadpool_redis::Pool>,
}
impl Db {
pub fn new<S: Into<String>>(url: S) -> Result<Db, Error> {
let pool = deadpool_redis::Config::from_url(url)
.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
Ok(Db {
pool: Arc::new(pool),
})
}
#[tracing::instrument(name = "db::get", skip_all, err)]
pub async fn get(&self) -> Result<Connection, Error> {
Ok(self.pool.get().await?)
}
}
| true |
b1bd83ce24f283cf8e7557119604d2c4d6e14918
|
Rust
|
coffeecup-winner/icfpc2020
|
/src/eval.rs
|
UTF-8
| 9,844 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use crate::syntax::{Stmt, Token, Var};
#[derive(Debug, Default)]
pub struct State {
vars: HashMap<Var, Value>,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Value_ {
Var(Var),
Number(i64),
BuiltIn(BuiltIn),
Apply(Value, Value),
}
pub fn var(v: Var) -> Value {
Rc::new(RefCell::new(V {
val: Value_::Var(v),
computed: false,
}))
}
pub fn number(n: i64) -> Value {
Rc::new(RefCell::new(V {
val: Value_::Number(n),
computed: true,
}))
}
pub fn b(b: BuiltIn) -> Value {
Rc::new(RefCell::new(V {
val: Value_::BuiltIn(b),
computed: true,
}))
}
pub fn ap(f: Value, arg: Value) -> Value {
Rc::new(RefCell::new(V {
val: Value_::Apply(f, arg),
computed: false,
}))
}
#[derive(Debug, PartialEq, Clone)]
pub struct V {
pub val: Value_,
computed: bool,
}
impl V {
pub fn unwrap_number(&self) -> i64 {
if let Value_::Number(n) = &self.val {
*n
} else {
panic!("Not a number");
}
}
}
pub type Value = Rc<RefCell<V>>;
// Built-in functions except `ap`
#[derive(Debug, PartialEq, Clone)]
pub enum BuiltIn {
Inc, // #5
Dec, // #6
Add, // #7
Mul, // #9
Div, // #10
Eq, // #11
Lt, // #12
Neg, // #16
S, // #18
C, // #19
B, // #20
True, // #21
False, // #22
Pwr2, // #23
I, // #24
Cons, // #25
Head, // #26
Tail, // #27
Nil, // #28
IsNil, // #29
}
impl State {
pub fn new() -> Self {
State::default()
}
pub fn eval_v(&self, var: &Var) -> Value {
let v = self.vars.get(var).unwrap();
self.eval(v.clone())
}
pub fn eval(&self, val: Value) -> Value {
if val.borrow().computed {
return val;
}
let mut curr = val.clone();
loop {
let new = self.eval_core(curr.clone());
if Rc::ptr_eq(&new, &curr) {
let value = new.borrow().val.clone();
val.borrow_mut().val = value;
val.borrow_mut().computed = true;
break val;
}
curr = new.clone();
}
}
fn eval_core(&self, val: Value) -> Value {
// println!("eval_value: {:?}", val);
if val.borrow().computed {
return val;
}
let value = val.borrow().val.clone();
match &value {
Value_::Var(v) => self.vars.get(&v).unwrap().clone(),
Value_::Number(_) => val,
Value_::BuiltIn(_) => val,
Value_::Apply(f0, arg0) => {
match &self.eval(f0.clone()).borrow().val {
Value_::BuiltIn(BuiltIn::Inc) => {
if let Value_::Number(n) = self.eval(arg0.clone()).borrow().val {
number(n + 1)
} else {
panic!("Invalid argument for `inc`");
}
}
Value_::BuiltIn(BuiltIn::Dec) => {
number(self.eval(arg0.clone()).borrow().unwrap_number() - 1)
}
Value_::BuiltIn(BuiltIn::Neg) => {
number(-self.eval(arg0.clone()).borrow().unwrap_number())
}
Value_::BuiltIn(BuiltIn::Pwr2) => number(
(2 as i64).pow(self.eval(arg0.clone()).borrow().unwrap_number() as u32),
),
Value_::BuiltIn(BuiltIn::I) => arg0.clone(),
Value_::BuiltIn(BuiltIn::Head) => ap(arg0.clone(), b(BuiltIn::True)),
Value_::BuiltIn(BuiltIn::Tail) => ap(arg0.clone(), b(BuiltIn::False)),
Value_::BuiltIn(BuiltIn::Nil) => b(BuiltIn::True),
Value_::BuiltIn(BuiltIn::IsNil) => ap(
arg0.clone(),
ap(b(BuiltIn::True), ap(b(BuiltIn::True), b(BuiltIn::False))),
),
// ===== Arity 2 =====
Value_::Apply(f1, arg1) => {
match &self.eval(f1.clone()).borrow().val {
Value_::BuiltIn(BuiltIn::Add) => number(
self.eval(arg1.clone()).borrow().unwrap_number()
+ self.eval(arg0.clone()).borrow().unwrap_number(),
),
Value_::BuiltIn(BuiltIn::Mul) => number(
self.eval(arg1.clone()).borrow().unwrap_number()
* self.eval(arg0.clone()).borrow().unwrap_number(),
),
Value_::BuiltIn(BuiltIn::Div) => number(
self.eval(arg1.clone()).borrow().unwrap_number()
/ self.eval(arg0.clone()).borrow().unwrap_number(),
),
Value_::BuiltIn(BuiltIn::Eq) => {
if self.eval(arg1.clone()).borrow().unwrap_number()
== self.eval(arg0.clone()).borrow().unwrap_number()
{
b(BuiltIn::True)
} else {
b(BuiltIn::False)
}
}
Value_::BuiltIn(BuiltIn::Lt) => {
if self.eval(arg1.clone()).borrow().unwrap_number()
< self.eval(arg0.clone()).borrow().unwrap_number()
{
b(BuiltIn::True)
} else {
b(BuiltIn::False)
}
}
Value_::BuiltIn(BuiltIn::True) => arg1.clone(),
Value_::BuiltIn(BuiltIn::False) => arg0.clone(),
Value_::BuiltIn(BuiltIn::Cons) => {
let cons = ap(
ap(b(BuiltIn::Cons), self.eval(arg1.clone())),
self.eval(arg0.clone()),
);
cons.borrow_mut().computed = true;
cons
}
// ===== Arity 3 =====
Value_::Apply(f2, arg2) => match &self.eval(f2.clone()).borrow().val {
Value_::BuiltIn(BuiltIn::S) => ap(
ap(arg2.clone(), arg0.clone()),
ap(arg1.clone(), arg0.clone()),
),
Value_::BuiltIn(BuiltIn::C) => {
ap(ap(arg2.clone(), arg0.clone()), arg1.clone())
}
Value_::BuiltIn(BuiltIn::B) => {
ap(arg2.clone(), ap(arg1.clone(), arg0.clone()))
}
Value_::BuiltIn(BuiltIn::Cons) => {
ap(ap(arg0.clone(), arg2.clone()), arg1.clone())
}
_ => val,
},
_ => val,
}
}
_ => val,
}
}
}
}
pub fn interpret(&mut self, stmt: Stmt) {
// println!("Compiling {:?}", stmt.var);
// println!("Raw: {:?}", stmt.code);
let v = self.compile(stmt.code);
// println!("Compiled: {:?}", v);
self.vars.insert(stmt.var, v);
}
fn compile(&self, code: Vec<Token>) -> Value {
let mut stack: Vec<Value> = vec![];
for token in code.into_iter().rev() {
match token {
Token::Var(v) => stack.push(var(v)),
Token::Number(n) => stack.push(number(n)),
Token::True => stack.push(b(BuiltIn::True)),
Token::False => stack.push(b(BuiltIn::False)),
Token::Nil => stack.push(b(BuiltIn::Nil)),
Token::Inc => stack.push(b(BuiltIn::Inc)),
Token::Dec => stack.push(b(BuiltIn::Dec)),
Token::Add => stack.push(b(BuiltIn::Add)),
Token::Mul => stack.push(b(BuiltIn::Mul)),
Token::Div => stack.push(b(BuiltIn::Div)),
Token::Eq => stack.push(b(BuiltIn::Eq)),
Token::Lt => stack.push(b(BuiltIn::Lt)),
Token::Neg => stack.push(b(BuiltIn::Neg)),
Token::S => stack.push(b(BuiltIn::S)),
Token::C => stack.push(b(BuiltIn::C)),
Token::B => stack.push(b(BuiltIn::B)),
Token::Pwr2 => stack.push(b(BuiltIn::Pwr2)),
Token::I => stack.push(b(BuiltIn::I)),
Token::Cons => stack.push(b(BuiltIn::Cons)),
Token::Head => stack.push(b(BuiltIn::Head)),
Token::Tail => stack.push(b(BuiltIn::Tail)),
Token::IsNil => stack.push(b(BuiltIn::IsNil)),
Token::Ap => {
let x = stack.pop().unwrap();
let v = stack.pop().unwrap();
stack.push(ap(x, v));
}
}
}
assert!(stack.len() == 1);
stack[0].clone()
}
}
| true |
78dd8a098efcf4785c3d5693903f42d961582f35
|
Rust
|
Enigmatrix/aoc17
|
/src/day09.rs
|
UTF-8
| 1,569 | 3.109375 | 3 |
[] |
no_license
|
use std::collections::*;
pub fn day09_1(s : String) -> u32{
let mut running_total = 0;
let mut scope = 0;
let mut in_garbage = false;
let mut prev_cancel = false;
for c in s.chars(){
if in_garbage {
if c == '>' && !prev_cancel {
in_garbage = false;
prev_cancel = false;
}
else if c == '!' && !prev_cancel {
prev_cancel = true;
}
else {
prev_cancel = false;
}
}
else{
if c == '{' {
scope+=1;
running_total+=scope;
}
else if c == '}' {
scope -=1;
}
else if c == '<' {
in_garbage = true;
}
}
}
running_total
}
pub fn day09_2(s: String) -> u32{
let mut running_total = 0;
let mut in_garbage = false;
let mut prev_cancel = false;
for c in s.chars(){
if in_garbage {
if c == '>' && !prev_cancel {
in_garbage = false;
prev_cancel = false;
}
else if c == '!' && !prev_cancel {
prev_cancel = true;
}
else if !prev_cancel{
running_total+=1;
}
else {
prev_cancel = false;
}
}
else{
if c == '<' {
in_garbage = true;
prev_cancel = false;
}
}
}
running_total
}
| true |
51873643e1c834476aa10a33115dc48e67a7223b
|
Rust
|
boa-dev/boa
|
/boa_engine/src/vm/opcode/iteration/loop_ops.rs
|
UTF-8
| 1,100 | 3.03125 | 3 |
[
"MIT",
"Unlicense"
] |
permissive
|
use crate::JsNativeError;
use crate::{
vm::{opcode::Operation, CompletionType},
Context, JsResult,
};
/// `IncrementLoopIteration` implements the Opcode Operation for `Opcode::IncrementLoopIteration`.
///
/// Operation:
/// - Increment loop itearation count.
#[derive(Debug, Clone, Copy)]
pub(crate) struct IncrementLoopIteration;
impl Operation for IncrementLoopIteration {
const NAME: &'static str = "IncrementLoopIteration";
const INSTRUCTION: &'static str = "INST - IncrementLoopIteration";
fn execute(context: &mut Context<'_>) -> JsResult<CompletionType> {
let previous_iteration_count = context.vm.frame_mut().loop_iteration_count;
let max = context.vm.runtime_limits.loop_iteration_limit();
if previous_iteration_count > max {
return Err(JsNativeError::runtime_limit()
.with_message(format!("Maximum loop iteration limit {max} exceeded"))
.into());
}
context.vm.frame_mut().loop_iteration_count = previous_iteration_count.wrapping_add(1);
Ok(CompletionType::Normal)
}
}
| true |
8dfe5acb97c76f092738d24f73ee258e33e39660
|
Rust
|
anasahmed700/Rust-examples
|
/ch05.1_structs/src/main.rs
|
UTF-8
| 2,739 | 3.671875 | 4 |
[] |
no_license
|
// 1. structs are user defined datatypes
// 2. stored in heap memory
// defining a struct
#[derive(Debug)]
struct Food {
restaurant : String,
item : String,
size : u8,
price : u16,
available : bool
} // new datatype Food is defined (blueprint)
#[derive(Debug)]
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool
}
// Tuple structs
#[derive(Debug)]
struct Rgb(u8, u8, u8);
#[derive(Debug)]
struct Point(u8, u8, u8);
fn main() {
// creating instance of Food struct
let pizza = Food{
restaurant : "Pizza Hut".to_string(),
item : String::from("Chicken Fajita"),
size : 9,
price : 800,
available : true
};
// mutable struct
let mut karahi = Food{
available : true,
restaurant : String::from("BBQ tonight"),
// taking field value from another instance
price : pizza.price,
item : "Chicken Ginger".to_string(),
size : 1
};
let biryani = Food{
restaurant: String::from("Student Biryani"),
item: String::from("Beef Biryani"),
..karahi // Creating Instances From Other Instances With Struct Update Syntax
};
println!("Karahi: {:#?}", karahi);
karahi.price = 1100; // mutable struct value is changed
println!("Karahi {} price is {}", karahi.item, karahi.price);
println!("Biryani: {:#?}", biryani);
println!("{} price is {}", biryani.item, karahi.price);
println!("Struct with functions...");
func_struct(pizza); // here pizza moved to func_struct scope
// println!("{:#?}", pizza); // error borrowing moved value
println!("Struct in function return...");
println!("{:#?}", struct_in_fn());
let username = String::from("anasahmed700");
let email = String::from("[email protected]");
println!("User details: {:#?}", build_user(email, username));
let white = Rgb(255, 255, 255);
let origin = Point(0, 0, 0);
println!("RGB Color values: {:?} Coordinates: {:?}", white, origin)
}
// using struct with function
fn func_struct(data: Food){
println!("restaurant => {}", data.restaurant);
println!("item => {}", data.item);
println!("price => {}", data.price);
}
// struct in function return
fn struct_in_fn() -> Food{
let chai = Food{
available : true,
restaurant : String::from("Baba ka dhaba"),
price : 100,
item : "Doodh patti".to_string(),
size : 2
};
chai
}
fn build_user(email: String, username: String) -> User{
User{
// Using the Field Init Shorthand when Variables and Fields Have the Same Name
email,
username,
active: true,
sign_in_count: 1
}
}
| true |
029094f879a3f01199ab7f0c0bd2134345a0ee1a
|
Rust
|
sugyan/atcoder
|
/arc040/src/bin/b.rs
|
UTF-8
| 496 | 2.875 | 3 |
[] |
no_license
|
use proconio::marker::Chars;
use proconio::{fastout, input};
#[fastout]
fn main() {
input! {
n: usize, r: usize,
mut s: Chars,
}
let mut answer = 0;
if let Some(pos) = s.iter().rev().position(|&c| c == '.') {
if n - r > pos {
answer += n - r - pos;
}
}
while let Some(pos) = s.iter().position(|&c| c == '.') {
answer += 1;
(0..r).for_each(|i| s[(n - 1).min(pos + i)] = 'o');
}
println!("{}", answer);
}
| true |
9e8723f4f0b16eee2c836371d6f6e63a4a7d9d31
|
Rust
|
jpverkamp/advent-of-code
|
/2022/src/bin/25-snafuinator.rs
|
UTF-8
| 1,920 | 3.265625 | 3 |
[] |
no_license
|
use aoc::*;
use std::{fmt::Display, path::Path};
#[derive(Clone, Debug)]
struct Snafu {
value: String,
}
impl Display for Snafu {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.value)
}
}
impl From<String> for Snafu {
fn from(value: String) -> Self {
Snafu { value }
}
}
impl From<isize> for Snafu {
fn from(mut v: isize) -> Self {
// Convert to base 5
let mut digits = Vec::new();
while v > 0 {
let m = v % 5;
v = v / 5;
if m < 3 {
digits.push(m.to_string());
} else if m == 3 {
digits.push(String::from("="));
v += 1;
} else if m == 4 {
digits.push(String::from("-"));
v += 1;
}
}
Snafu {
value: digits.into_iter().rev().collect::<Vec<_>>().join(""),
}
}
}
impl Into<isize> for Snafu {
fn into(self) -> isize {
self.value.chars().fold(0, |a, c| match c {
'2' | '1' | '0' => a * 5 + c.to_digit(10).unwrap() as isize,
'-' => a * 5 - 1,
'=' => a * 5 - 2,
_ => panic!("Snafu SNAFUed, what the Snafu is a {c}"),
})
}
}
fn part1(filename: &Path) -> String {
Snafu::from(
iter_lines(filename)
.map(Snafu::from)
.map::<isize, _>(Snafu::into)
.sum::<isize>(),
)
.to_string()
}
fn part2(_filename: &Path) -> String {
String::from("Start The Blender")
}
fn main() {
aoc_main(part1, part2);
}
#[cfg(test)]
mod tests {
use crate::{part1, part2};
use aoc::aoc_test;
#[test]
fn test1() {
aoc_test("25", part1, "2-10==12-122-=1-1-22")
}
// too low: 35023647158862
#[test]
fn test2() {
aoc_test("25", part2, "Start The Blender")
}
}
| true |
6a3299f29f84cf56e7e95e950721283af87adb00
|
Rust
|
gijs/t-rex
|
/src/cache/filecache.rs
|
UTF-8
| 2,570 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use cache::cache::Cache;
use std::fs::{self,File};
use std::io::{self,Read,Write};
use std::path::Path;
pub struct Filecache {
pub basepath: String,
}
impl Filecache {
fn dir(&self, tileset: &str, xtile: u16, ytile: u16, zoom: u16) -> String {
format!("{}/{}/{}/{}", self.basepath, tileset, zoom, xtile)
}
fn path(&self, tileset: &str, xtile: u16, ytile: u16, zoom: u16) -> String {
format!("{}/{}.pbf", self.dir(tileset, xtile, ytile, zoom), ytile)
}
}
impl Cache for Filecache {
fn lookup<F>(&self, tileset: &str, xtile: u16, ytile: u16, zoom: u16, mut read: F) -> Result<(), io::Error>
where F : FnMut(&mut Read) -> Result<(), io::Error>
{
let path = self.path(tileset, xtile, ytile, zoom);
debug!("Filecache.lookup {}", path);
match File::open(&path) {
Ok(mut f) => read(&mut f),
Err(e) => Err(e)
}
}
fn store<F>(&self, tileset: &str, xtile: u16, ytile: u16, zoom: u16, write: F) -> Result<(), io::Error>
where F : Fn(&mut Write) -> Result<(), io::Error>
{
let path = self.path(tileset, xtile, ytile, zoom);
debug!("Filecache.store {}", path);
let dir = self.dir(tileset, xtile, ytile, zoom);
try!(fs::create_dir_all(Path::new(&dir as &str)));
let mut f = try!(File::create(path));
write(&mut f)
}
}
#[test]
fn test_file() {
use std::env;
let mut dir = env::temp_dir();
dir.push("t_rex_test");
let basepath = format!("{}", &dir.display());
fs::remove_dir_all(&basepath);
let cache = Filecache { basepath: basepath };
assert_eq!(cache.dir("tileset", 1, 2, 0), format!("{}/{}", cache.basepath, "tileset/0/1"));
let pbf = format!("{}/{}", cache.basepath, "tileset/0/1/2.pbf");
assert_eq!(cache.path("tileset", 1, 2, 0), pbf);
// Cache miss
assert!(cache.lookup("tileset", 1, 2, 0, |_| Ok(())).is_err());
// Write into cache
let res = cache.store("tileset", 1, 2, 0, |f| {
f.write_all("0123456789".as_bytes())
});
assert_eq!(res.ok(), Some(()));
assert!(Path::new(&pbf).exists());
// Cache hit
assert!(cache.lookup("tileset", 1, 2, 0, |_| Ok(())).is_ok());
// Read from cache
let mut s = String::new();
cache.lookup("tileset", 1, 2, 0, |f| {
f.read_to_string(&mut s).map(|_| ())
});
assert_eq!(&s, "0123456789");
}
| true |
893478139584067fcb035cc6a4e6b83dc9cabab4
|
Rust
|
bootandy/ports2
|
/src/main.rs
|
UTF-8
| 5,710 | 2.734375 | 3 |
[] |
no_license
|
extern crate pnet;
use pnet::datalink::Channel::Ethernet;
use pnet::datalink::{self, NetworkInterface};
use pnet::packet::ethernet::EtherType;
use pnet::packet::ethernet::EtherTypes::{Arp, Ipv4, Ipv6, Rarp, Vlan, WakeOnLan};
use pnet::packet::ethernet::EthernetPacket;
use pnet::packet::PrimitiveValues;
use pnet::util::MacAddr;
use std::cmp::max;
use std::collections::HashMap;
use std::env;
use std::thread;
use std::time::Duration;
use std::time::SystemTime;
const OLD_ETHERNET: u16 = 2047;
struct PacketTracker {
counter: HashMap<u16, u64>,
is_my_box: HashMap<bool, u64>,
me: NetworkInterface,
just_me: bool,
}
impl PacketTracker {
fn new(iface: NetworkInterface, jm: bool) -> PacketTracker {
let mut pt = PacketTracker {
counter: HashMap::new(),
is_my_box: HashMap::new(),
me: iface,
just_me: jm,
};
pt.is_my_box.entry(true).or_insert(0);
pt.is_my_box.entry(false).or_insert(0);
pt
}
fn inspect_packet(&mut self, packet: EthernetPacket) {
let packet_is_for_me = packet.get_source() == self.me.mac.unwrap()
|| packet.get_destination() == self.me.mac.unwrap();
if self.just_me && !packet_is_for_me {
return;
}
let c = self.is_my_box.entry(packet_is_for_me).or_insert(0);
*c += 1;
let v = max(OLD_ETHERNET, packet.get_ethertype().to_primitive_values().0);
let c = self.counter.entry(v).or_insert(0);
*c += 1;
// println!("got packet size: {:?}", MutableEthernetPacket::packet_size(&packet));
}
fn pretty_out(&mut self, start_time: &SystemTime) {
println!("Time from {:?} ", start_time);
println!("My IP: {:?}", self.me.ips);
for (k, v) in self.counter.iter() {
#[allow(non_upper_case_globals)]
let print_k = match EtherType(*k) {
EtherType(OLD_ETHERNET) => "Pre ether2".to_string(),
Arp => "Arp".to_string(),
Rarp => "Rarp".to_string(),
Vlan => "Vlan".to_string(),
WakeOnLan => "WakeOnLan".to_string(),
Ipv4 => "Ipv4".to_string(),
Ipv6 => "Ipv6".to_string(),
_ => format!("Unknown {}", k),
};
println!(" {:<15} : {} ", print_k, v)
}
if !self.just_me {
println!(" packets for me : {:?} ", self.is_my_box[&true]);
println!(" packets for others : {:?} ", self.is_my_box[&false]);
}
self.counter.clear();
self.is_my_box.clear();
self.is_my_box.entry(true).or_insert(0);
self.is_my_box.entry(false).or_insert(0);
}
}
fn mac_to_string(mac: Option<MacAddr>) -> String {
match mac {
Some(m) => m.to_string(),
None => "Unknown mac address".to_string(),
}
}
fn print_my_options() {
println!("Run me as root with a name of a network interface");
println!("Example: sudo ports2 lo");
println!("Here are your network interfaces");
println!("Name: MAC:");
for i in datalink::interfaces().into_iter() {
println!("{:<9} {:?}", i.name, mac_to_string(i.mac));
}
}
// Invoke as <interface name>
fn main() {
match env::args().nth(1) {
None => print_my_options(),
Some(interface_name) => {
let just_me = env::args().nth(2).unwrap_or_else(|| "false".to_string());
doit(&interface_name, just_me.to_lowercase() == "true")
}
}
}
fn doit(interface_name: &str, just_me: bool) {
let interface_names_match = |iface: &NetworkInterface| iface.name == *interface_name;
// Find the network interface with the provided name
let interfaces = datalink::interfaces();
let interface_a = interfaces.into_iter().find(interface_names_match);
if let Some(interface) = interface_a {
println!("Running packet monitor");
if just_me {
println!("Just analysing packets for this box");
} else {
println!("Analysing all packets seen on network");
}
start_tracking(&interface, just_me);
} else {
println!("Can not find interface with name {}", interface_name);
print_my_options();
}
}
fn start_tracking(interface: &NetworkInterface, just_me: bool) {
let mut pt = PacketTracker::new(interface.clone(), just_me);
// Create a new channel, dealing with layer 2 packets
let (_tx, mut rx) = match datalink::channel(&interface, Default::default()) {
Ok(Ethernet(tx, rx)) => (tx, rx),
Ok(_) => panic!("Unhandled channel type"),
Err(e) => panic!(
"An error occurred when creating the datalink channel: {}",
e
),
};
//print_thread(&pt);
//let mut count = 0;
let mut start_counting_time = SystemTime::now();
loop {
//count += 1;
/*if count > 30 {
break
}*/
match rx.next() {
Ok(packet) => {
let packet = EthernetPacket::new(packet).unwrap();
pt.inspect_packet(packet);
}
Err(e) => {
// If an error occurs, we can handle it here
panic!("An error occurred while reading: {}", e);
}
}
if start_counting_time + Duration::new(5, 0) < SystemTime::now() {
pt.pretty_out(&start_counting_time);
start_counting_time = SystemTime::now()
}
}
}
/*fn print_thread(pt: &PacketTracker) {
thread::spawn(|| {
loop {
thread::sleep(Duration::from_millis(1000 * 5));
pt.pretty_out();
}
});
}*/
| true |
1764463440aeff4dd12782e01b3a3c2a081f6ac4
|
Rust
|
Pomettini/impostor
|
/src/adapter/mod.rs
|
UTF-8
| 689 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
use {Address, AddressBusIO, As, Data};
pub struct BusAdapter<'a, T: Address, U: Data> {
connection: &'a mut dyn AddressBusIO<T, U>,
}
impl<'a, T: Address, U: Data> BusAdapter<'a, T, U> {
pub fn new(bus: &'a mut dyn AddressBusIO<T, U>) -> BusAdapter<'a, T, U> {
BusAdapter { connection: bus }
}
}
impl<'a, T: Address + As<V>, U: Data + As<Z>, V: Address + As<T>, Z: Data + As<U>>
AddressBusIO<T, U> for BusAdapter<'a, V, Z>
{
fn read(&mut self, address: T) -> U {
self.connection.read(address.as_()).as_()
}
fn write(&mut self, address: T, value: U) {
self.connection.write(address.as_(), value.as_())
}
}
#[cfg(test)]
mod tests;
| true |
28d1fa46bb30bd930ab207a4ca040de3704f8869
|
Rust
|
austinkeeley/rust
|
/src/tools/clippy/clippy_lints/src/methods/unnecessary_lazy_eval.rs
|
UTF-8
| 4,254 | 2.953125 | 3 |
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive",
"NCSA"
] |
permissive
|
use crate::utils::{is_type_diagnostic_item, match_qpath, snippet, span_lint_and_sugg};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::LateContext;
use super::UNNECESSARY_LAZY_EVALUATIONS;
// Return true if the expression is an accessor of any of the arguments
fn expr_uses_argument(expr: &hir::Expr<'_>, params: &[hir::Param<'_>]) -> bool {
params.iter().any(|arg| {
if_chain! {
if let hir::PatKind::Binding(_, _, ident, _) = arg.pat.kind;
if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = expr.kind;
if let [p, ..] = path.segments;
then {
ident.name == p.ident.name
} else {
false
}
}
})
}
fn match_any_qpath(path: &hir::QPath<'_>, paths: &[&[&str]]) -> bool {
paths.iter().any(|candidate| match_qpath(path, candidate))
}
fn can_simplify(expr: &hir::Expr<'_>, params: &[hir::Param<'_>], variant_calls: bool) -> bool {
match expr.kind {
// Closures returning literals can be unconditionally simplified
hir::ExprKind::Lit(_) => true,
hir::ExprKind::Index(ref object, ref index) => {
// arguments are not being indexed into
if expr_uses_argument(object, params) {
false
} else {
// arguments are not used as index
!expr_uses_argument(index, params)
}
},
// Reading fields can be simplified if the object is not an argument of the closure
hir::ExprKind::Field(ref object, _) => !expr_uses_argument(object, params),
// Paths can be simplified if the root is not the argument, this also covers None
hir::ExprKind::Path(_) => !expr_uses_argument(expr, params),
// Calls to Some, Ok, Err can be considered literals if they don't derive an argument
hir::ExprKind::Call(ref func, ref args) => if_chain! {
if variant_calls; // Disable lint when rules conflict with bind_instead_of_map
if let hir::ExprKind::Path(ref path) = func.kind;
if match_any_qpath(path, &[&["Some"], &["Ok"], &["Err"]]);
then {
// Recursively check all arguments
args.iter().all(|arg| can_simplify(arg, params, variant_calls))
} else {
false
}
},
// For anything more complex than the above, a closure is probably the right solution,
// or the case is handled by an other lint
_ => false,
}
}
/// lint use of `<fn>_else(simple closure)` for `Option`s and `Result`s that can be
/// replaced with `<fn>(return value of simple closure)`
pub(super) fn lint<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx hir::Expr<'_>,
args: &'tcx [hir::Expr<'_>],
allow_variant_calls: bool,
simplify_using: &str,
) {
let is_option = is_type_diagnostic_item(cx, cx.typeck_results().expr_ty(&args[0]), sym!(option_type));
let is_result = is_type_diagnostic_item(cx, cx.typeck_results().expr_ty(&args[0]), sym!(result_type));
if is_option || is_result {
if let hir::ExprKind::Closure(_, _, eid, _, _) = args[1].kind {
let body = cx.tcx.hir().body(eid);
let ex = &body.value;
let params = &body.params;
if can_simplify(ex, params, allow_variant_calls) {
let msg = if is_option {
"unnecessary closure used to substitute value for `Option::None`"
} else {
"unnecessary closure used to substitute value for `Result::Err`"
};
span_lint_and_sugg(
cx,
UNNECESSARY_LAZY_EVALUATIONS,
expr.span,
msg,
&format!("Use `{}` instead", simplify_using),
format!(
"{0}.{1}({2})",
snippet(cx, args[0].span, ".."),
simplify_using,
snippet(cx, ex.span, ".."),
),
Applicability::MachineApplicable,
);
}
}
}
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.