blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
751a2fd63de8d440cbb843005df566800b4b1d67
|
Rust
|
Twinklebear/light_arena
|
/src/lib.rs
|
UTF-8
| 9,254 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
//! **Temporarily a more simple memory pool for keeping stack alloc objects
//! in copied into a shared heap rather than a true placement new memory arena.**
//! Unfortunately the path forward for placement new in Rust does not look
//! good right now, so I've reverted this crate to work more like a memory
//! heap where stuff can be put, but not constructed in place. This mimics
//! similar behavior, but allocations are limited to the stack size and
//! must first be made on the stack then copied in.
//!
//! This crate is written to solve a specific problem I have in
//! [tray\_rust](https://github.com/Twinklebear/tray_rust), where I want to
//! store trait objects and f32 arrays in a memory arena which is then reset
//! and reused for each pixel rendered (but not free'd and reallocated!).
//! The key features to enable this are the use of the nightly placement new feature, letting us
//! actually construct objects in place instead of copying from a stack temporary,
//! and reusing the previously allocated space via the `Allocator` scopes.
//! If you have a similar problem, this might be the right crate for you!
//! ## Examples
//!
//! Allocations in a `MemoryArena` are made using an allocator and the
//! placement in syntax. The `Allocator` grants exclusive access to the
//! arena while it's in scope, allowing to make allocations. Once the `Allocator`
//! is dropped the space used is marked available again for subsequent allocations.
//! Note that **Drop is never called** on objects allocated in the arena,
//! and thus the restriction that `T: Sized + Copy`.
//!
//! The arena is untyped and can store anything which is `Sized + Copy`.
//!
//! ```rust
//!
//! trait Foo {
//! fn speak(&self);
//! }
//!
//! #[derive(Copy, Clone)]
//! struct Bar(i32);
//! impl Foo for Bar {
//! fn speak(&self) {
//! println!("Bar! val = {}", self.0);
//! }
//! }
//!
//! #[derive(Copy, Clone)]
//! struct Baz;
//! impl Foo for Baz {
//! fn speak(&self) {
//! println!("Baz!");
//! }
//! }
//!
//! let mut arena = light_arena::MemoryArena::new(2);
//! let allocator = arena.allocator();
//! let a: &Foo = allocator.alloc(Baz);
//! let b: &Foo = allocator.alloc(Bar(10));
//! let c: &Foo = allocator.alloc(Bar(14));
//! a.speak();
//! b.speak();
//! c.speak();
//! // Storing 0-sized types can give some interesting results
//! println!("a = {:p}", a as *const Foo);
//! println!("b = {:p}", b as *const Foo);
//! println!("c = {:p}", c as *const Foo);
//! ```
//!
//! ## Blockers
//!
//! - placement\_in\_syntax and placement\_new\_protocol are required,
//! see https://github.com/rust-lang/rust/issues/27779
use std::cell::RefCell;
use std::{cmp, mem, ptr};
/// A block of bytes used to back allocations requested from the `MemoryArena`.
struct Block {
buffer: Vec<u8>,
size: usize,
}
impl Block {
/// Create a new block of some fixed size, in bytes
fn new(size: usize) -> Block {
Block {
buffer: Vec::with_capacity(size),
size: 0,
}
}
/// Reserve `size` bytes at alignment `align`. Returns null if the block doesn't
/// have enough room.
unsafe fn reserve(&mut self, size: usize, align: usize) -> *mut u8 {
if self.has_room(size, align) {
let align_offset =
align_address(self.buffer.as_ptr().offset(self.size as isize), align);
let ptr = self.buffer
.as_mut_ptr()
.offset((self.size + align_offset) as isize);
self.size += size + align_offset;
ptr
} else {
ptr::null_mut()
}
}
/// Check if this block has `size` bytes available at alignment `align`
fn has_room(&self, size: usize, align: usize) -> bool {
let ptr = unsafe { self.buffer.as_ptr().offset(self.size as isize) };
let align_offset = align_address(ptr, align);
self.buffer.capacity() - self.size >= size + align_offset
}
}
/// Compute the number of bytes we need to offset the `ptr` by to align
/// it to the desired alignment.
fn align_address(ptr: *const u8, align: usize) -> usize {
let addr = ptr as usize;
if addr % align != 0 {
align - addr % align
} else {
0
}
}
/// Provides the backing storage to serve allocations requested by an `Allocator`.
///
/// The `MemoryArena` allocates blocks of fixed size on demand as its existing
/// blocks get filled by allocation requests. To make allocations in the
/// arena use the `Allocator` returned by `allocator`. Only one `Allocator`
/// can be active for an arena at a time, after the allocator is dropped
/// the space used by its allocations is made available again.
pub struct MemoryArena {
blocks: Vec<Block>,
block_size: usize,
}
impl MemoryArena {
/// Create a new `MemoryArena` with the requested block size (in MB).
/// The arena will allocate one initial block on creation, and further
/// blocks of `block_size_mb` size, or larger if needed to meet a large
/// allocation, on demand as allocations are made.
pub fn new(block_size_mb: usize) -> MemoryArena {
let block_size = block_size_mb * 1024 * 1024;
MemoryArena {
blocks: vec![Block::new(block_size)],
block_size: block_size,
}
}
/// Get an allocator for the arena. Only a single `Allocator` can be
/// active for an arena at a time. Upon destruction of the `Allocator`
/// its allocated data is marked available again.
pub fn allocator(&mut self) -> Allocator {
Allocator {
arena: RefCell::new(self),
}
}
/// Reserve a chunk of bytes in some block of the memory arena
unsafe fn reserve(&mut self, size: usize, align: usize) -> *mut u8 {
for b in &mut self.blocks[..] {
if b.has_room(size, align) {
return b.reserve(size, align);
}
}
// No free blocks with enough room, we have to allocate. We also make
// sure we've got align bytes of padding available as we don't assume
// anything about the alignment of the underlying buffer.
let new_block_size = cmp::max(self.block_size, size + align);
self.blocks.push(Block::new(new_block_size));
let b = &mut self.blocks.last_mut().unwrap();
b.reserve(size, align)
}
}
/// The allocator provides exclusive access to the memory arena, allowing
/// for allocation of objects in the arena.
///
/// Objects allocated by an allocated cannot outlive it, upon destruction
/// of the allocator the memory space it requested will be made available
/// again. **Drops of allocated objects are not called**, only
/// types which are `Sized + Copy` can be safely stored.
pub struct Allocator<'a> {
arena: RefCell<&'a mut MemoryArena>,
}
impl<'a> Allocator<'a> {
/// Get a dynamically sized slice of data from the allocator. The
/// contents of the slice will be unintialized.
pub fn alloc_slice<T: Sized + Copy>(&self, len: usize) -> &mut [T] {
let mut arena = self.arena.borrow_mut();
let size = len * mem::size_of::<T>();
unsafe {
let ptr = arena.reserve(size, mem::align_of::<T>()) as *mut T;
std::slice::from_raw_parts_mut(ptr, len)
}
}
pub fn alloc<T: Sized + Copy>(&self, object: T) -> &mut T {
assert!(!mem::needs_drop::<T>());
// assert!(mem::size_of::<T>() != 0);
let mut arena = self.arena.borrow_mut();
unsafe {
let ptr = arena.reserve(mem::size_of::<T>(), mem::align_of::<T>());
ptr::write(ptr as *mut T, object);
&mut *(ptr as *mut T)
}
}
}
impl<'a> Drop for Allocator<'a> {
/// Upon dropping the allocator we mark all the blocks in the arena
/// as empty again, "releasing" our allocations.
fn drop(&mut self) {
let mut arena = self.arena.borrow_mut();
for b in &mut arena.blocks[..] {
b.size = 0;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn aligner() {
assert_eq!(align_address(4 as *const u8, 4), 0);
assert_eq!(align_address(5 as *const u8, 4), 3);
assert_eq!(align_address(17 as *const u8, 1), 0);
}
#[test]
fn block() {
let mut b = Block::new(16);
assert!(b.has_room(16, 1));
let a = unsafe { b.reserve(3, 1) };
let c = unsafe { b.reserve(4, 4) };
assert_eq!(c as usize - a as usize, 4);
// This check is kind of assuming that the block's buffer
// is at least 4-byte aligned which is probably a safe assumption.
assert_eq!(b.size, 8);
assert!(!b.has_room(32, 4));
let d = unsafe { b.reserve(32, 4) };
assert_eq!(d, ptr::null_mut());
}
#[test]
fn memory_arena() {
let mut arena = MemoryArena::new(1);
let a = unsafe { arena.reserve(1024, 4) };
assert_eq!(align_address(a, 4), 0);
let two_mb = 2 * 1024 * 1024;
let b = unsafe { arena.reserve(two_mb, 32) };
assert_eq!(align_address(b, 32), 0);
assert_eq!(arena.blocks.len(), 2);
assert_eq!(arena.blocks[1].buffer.capacity(), two_mb + 32);
}
}
| true |
1dbcc26a6d8426a596e8138f0e0a47529252b843
|
Rust
|
kbknapp/usbwatch-rs
|
/src/state.rs
|
UTF-8
| 5,912 | 2.921875 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
use std::{
collections::HashMap,
fs::{self, File},
path::Path,
};
use tracing::{self, debug, info, span, Level};
use yaml_rust::YamlLoader;
use crate::{
rule::{Rule, Rules},
usb::{UsbDevice, UsbDevices, UsbPort, UsbPorts},
};
#[derive(Default)]
pub struct State {
ports: Vec<UsbPort>,
devices: Vec<UsbDevice>,
active_devices: Vec<usize>,
// Port->Device
slot_map: HashMap<usize, Option<usize>>,
// Device->Port
rev_slot_map: HashMap<usize, usize>,
pub rules: Vec<Rule>,
}
impl State {
pub fn new() -> Self { Self::default() }
pub fn devices_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn devices_from_file", file = ?path.as_ref());
let _enter = span.enter();
let file = File::open(path).unwrap();
let devices: UsbDevices = serde_yaml::from_reader(file).unwrap();
info!(num_devs= %devices.devices.len(), "Found Devices");
for device in devices.devices.into_iter() {
debug!(device = %device, "Adding Device");
self.add_device(device);
}
}
pub fn ports_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn ports_from_file", file = ?path.as_ref());
let _enter = span.enter();
let file = File::open(path).unwrap();
let ports: UsbPorts = serde_yaml::from_reader(file).unwrap();
info!(num_ports= %ports.ports.len(), "Found Ports");
for port in ports.ports.into_iter() {
debug!(port = %port, "Adding Port");
self.add_port(port);
}
}
pub fn rules_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn rules_from_file", file = ?path.as_ref());
let _enter = span.enter();
let buf = fs::read_to_string(path).unwrap();
let rules = Rules::from(&YamlLoader::load_from_str(&buf).unwrap()[0]);
info!(num_rules= %rules.rules.len(), "Found Rules");
for rule in rules.rules.into_iter() {
debug!(ruel = ?rule.name, "Adding Rule");
self.rules.push(rule);
}
}
pub fn add_port(&mut self, port: UsbPort) {
let span = span!(Level::TRACE, "fn add_port", port = %port);
let _enter = span.enter();
for p in self.ports.iter() {
if p == &port {
debug!("Port already exists; returning");
return;
}
}
self.ports.push(port);
debug!(key = self.ports.len(), "Slotting empty port");
self.slot_map.entry(self.ports.len()).or_insert(None);
}
pub fn add_device(&mut self, device: UsbDevice) {
let span = span!(Level::TRACE, "fn add_device", device = %device);
let _enter = span.enter();
if self.devices.contains(&device) {
debug!("Device already exists; returning");
return;
}
self.devices.push(device);
}
pub fn add_and_slot_device(&mut self, device: UsbDevice, port: UsbPort) {
let span = span!(Level::TRACE, "fn add_and_slot_device", device = %device, port = %port);
let _enter = span.enter();
self.add_port(port.clone());
self.add_device(device.clone());
for (i, p) in self.ports.iter().enumerate() {
debug!(i=i, port = %p, "Iter ports");
if p == &port {
debug!("Matched Port");
for (j, d) in self.devices.iter().enumerate() {
debug!(j=j, device = %d, "Iter devices");
if d == &device {
debug!("Matched device");
debug!(
i = i,
j = j,
"Setting port slot {} to device index {}",
i,
j
);
*self.slot_map.entry(i).or_insert(Some(j)) = Some(j);
debug!(
i = i,
j = j,
"Setting reverse slot map device index {} to slot {}",
j,
i
);
*self.rev_slot_map.entry(j).or_insert(i) = i;
debug!("Activating device index {}", j);
self.active_devices.push(j);
debug!("Returning");
break;
}
}
}
}
}
pub fn rm_and_unslot_device(&mut self, device: UsbDevice) {
let span = span!(Level::TRACE, "fn rm_and_unslot_device", device = %device);
let _enter = span.enter();
for (i, d) in self.devices.iter().enumerate() {
debug!(i=i, device = %d, "Iter devices");
if d == &device {
debug!("Matched device");
if let Some(p) = self.rev_slot_map.get_mut(&i) {
debug!(
"Found port index {} via device reverse slot map index {}",
p, i
);
debug!("Setting slot map {} to None", p);
*self.slot_map.entry(*p).or_insert(None) = None;
}
let mut to_rem = None;
for (j, idx) in self.active_devices.iter().enumerate() {
if *idx == i {
to_rem = Some(j);
break;
}
}
if let Some(idx) = to_rem {
debug!("Removing device index {} from active devices", idx);
self.active_devices.swap_remove(idx);
}
debug!("Returning");
break;
}
}
}
}
| true |
e942eef8a7b54c5bea48e16e4c8115fb3e7edce2
|
Rust
|
baitcenter/gdlk
|
/api/src/util.rs
|
UTF-8
| 1,566 | 3.078125 | 3 |
[] |
no_license
|
//! General utility functions and types.
#[cfg(test)]
pub use tests::*;
use diesel::{r2d2::ConnectionManager, PgConnection};
/// Type aliases for DB connections
pub type Pool = r2d2::Pool<ConnectionManager<PgConnection>>;
pub type PooledConnection =
r2d2::PooledConnection<ConnectionManager<PgConnection>>;
#[cfg(test)]
mod tests {
use super::*;
use diesel::Connection;
/// Helper to create a database connection for testing. This establishes
/// the connection, then starts a test transaction on it so that no changes
/// will actually be written to the DB.
pub fn test_db_conn() -> PooledConnection {
let database_url = std::env::var("DATABASE_URL").unwrap();
// We want to build a connection pool so that we can pass into APIs
// that expect owned, pooled connections. The pool will also
// automatically close our connections for us.
let manager = diesel::r2d2::ConnectionManager::new(&database_url);
let pool = r2d2::Pool::builder().max_size(5).build(manager).unwrap();
let conn = pool.get().unwrap();
(&conn as &PgConnection).begin_test_transaction().unwrap();
conn
}
/// Assert that the first value is an Err, and that its string form matches
/// the second argument.
#[macro_export]
macro_rules! assert_err {
($res:expr, $msg:tt $(,)?) => {
match $res {
Ok(_) => panic!("Expected Err, got Ok"),
Err(err) => assert_eq!(format!("{}", err), $msg),
}
};
}
}
| true |
69b8a13cedffdf5f63a897310fe5a57949ab4a19
|
Rust
|
chromium/chromium
|
/third_party/rust/semver/v1/crate/src/lib.rs
|
UTF-8
| 19,858 | 2.828125 | 3 |
[
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! [![github]](https://github.com/dtolnay/semver) [![crates-io]](https://crates.io/crates/semver) [![docs-rs]](https://docs.rs/semver)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! A parser and evaluator for Cargo's flavor of Semantic Versioning.
//!
//! Semantic Versioning (see <https://semver.org>) is a guideline for how
//! version numbers are assigned and incremented. It is widely followed within
//! the Cargo/crates.io ecosystem for Rust.
//!
//! <br>
//!
//! # Example
//!
//! ```
//! use semver::{BuildMetadata, Prerelease, Version, VersionReq};
//!
//! fn main() {
//! let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
//!
//! // Check whether this requirement matches version 1.2.3-alpha.1 (no)
//! let version = Version {
//! major: 1,
//! minor: 2,
//! patch: 3,
//! pre: Prerelease::new("alpha.1").unwrap(),
//! build: BuildMetadata::EMPTY,
//! };
//! assert!(!req.matches(&version));
//!
//! // Check whether it matches 1.3.0 (yes it does)
//! let version = Version::parse("1.3.0").unwrap();
//! assert!(req.matches(&version));
//! }
//! ```
//!
//! <br><br>
//!
//! # Scope of this crate
//!
//! Besides Cargo, several other package ecosystems and package managers for
//! other languages also use SemVer: RubyGems/Bundler for Ruby, npm for
//! JavaScript, Composer for PHP, CocoaPods for Objective-C...
//!
//! The `semver` crate is specifically intended to implement Cargo's
//! interpretation of Semantic Versioning.
//!
//! Where the various tools differ in their interpretation or implementation of
//! the spec, this crate follows the implementation choices made by Cargo. If
//! you are operating on version numbers from some other package ecosystem, you
//! will want to use a different semver library which is appropriate to that
//! ecosystem.
//!
//! The extent of Cargo's SemVer support is documented in the *[Specifying
//! Dependencies]* chapter of the Cargo reference.
//!
//! [Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
#![doc(html_root_url = "https://docs.rs/semver/1.0.4")]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(all(not(feature = "std"), not(no_alloc_crate)), no_std)]
#![cfg_attr(not(no_unsafe_op_in_unsafe_fn_lint), deny(unsafe_op_in_unsafe_fn))]
#![cfg_attr(no_unsafe_op_in_unsafe_fn_lint, allow(unused_unsafe))]
#![cfg_attr(no_str_strip_prefix, allow(unstable_name_collisions))]
#![allow(
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::doc_markdown,
clippy::items_after_statements,
clippy::match_bool,
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::needless_doctest_main,
clippy::option_if_let_else,
clippy::ptr_as_ptr,
clippy::redundant_else,
clippy::semicolon_if_nothing_returned, // https://github.com/rust-lang/rust-clippy/issues/7324
clippy::similar_names,
clippy::unnested_or_patterns,
clippy::unseparated_literal_suffix,
clippy::wildcard_imports
)]
#[cfg(not(no_alloc_crate))]
extern crate alloc;
mod backport;
mod display;
mod error;
mod eval;
mod identifier;
mod impls;
mod parse;
#[cfg(feature = "serde")]
mod serde;
use crate::alloc::vec::Vec;
use crate::identifier::Identifier;
use core::str::FromStr;
#[allow(unused_imports)]
use crate::backport::*;
pub use crate::parse::Error;
/// **SemVer version** as defined by <https://semver.org>.
///
/// # Syntax
///
/// - The major, minor, and patch numbers may be any integer 0 through u64::MAX.
/// When representing a SemVer version as a string, each number is written as
/// a base 10 integer. For example, `1.0.119`.
///
/// - Leading zeros are forbidden in those positions. For example `1.01.00` is
/// invalid as a SemVer version.
///
/// - The pre-release identifier, if present, must conform to the syntax
/// documented for [`Prerelease`].
///
/// - The build metadata, if present, must conform to the syntax documented for
/// [`BuildMetadata`].
///
/// - Whitespace is not allowed anywhere in the version.
///
/// # Total ordering
///
/// Given any two SemVer versions, one is less than, greater than, or equal to
/// the other. Versions may be compared against one another using Rust's usual
/// comparison operators.
///
/// - The major, minor, and patch number are compared numerically from left to
/// right, lexicographically ordered as a 3-tuple of integers. So for example
/// version `1.5.0` is less than version `1.19.0`, despite the fact that
/// "1.19.0" < "1.5.0" as ASCIIbetically compared strings and 1.19 < 1.5
/// as real numbers.
///
/// - When major, minor, and patch are equal, a pre-release version is
/// considered less than the ordinary release: version `1.0.0-alpha.1` is
/// less than version `1.0.0`.
///
/// - Two pre-releases of the same major, minor, patch are compared by
/// lexicographic ordering of dot-separated components of the pre-release
/// string.
///
/// - Identifiers consisting of only digits are compared
/// numerically: `1.0.0-pre.8` is less than `1.0.0-pre.12`.
///
/// - Identifiers that contain a letter or hyphen are compared in ASCII sort
/// order: `1.0.0-pre12` is less than `1.0.0-pre8`.
///
/// - Any numeric identifier is always less than any non-numeric
/// identifier: `1.0.0-pre.1` is less than `1.0.0-pre.x`.
///
/// Example: `1.0.0-alpha` < `1.0.0-alpha.1` < `1.0.0-alpha.beta` < `1.0.0-beta` < `1.0.0-beta.2` < `1.0.0-beta.11` < `1.0.0-rc.1` < `1.0.0`
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Version {
pub major: u64,
pub minor: u64,
pub patch: u64,
pub pre: Prerelease,
pub build: BuildMetadata,
}
/// **SemVer version requirement** describing the intersection of some version
/// comparators, such as `>=1.2.3, <1.8`.
///
/// # Syntax
///
/// - Either `*` (meaning "any"), or one or more comma-separated comparators.
///
/// - A [`Comparator`] is an operator ([`Op`]) and a partial version, separated
/// by optional whitespace. For example `>=1.0.0` or `>=1.0`.
///
/// - Build metadata is syntactically permitted on the partial versions, but is
/// completely ignored, as it's never relevant to whether any comparator
/// matches a particular version.
///
/// - Whitespace is permitted around commas and around operators. Whitespace is
/// not permitted within a partial version, i.e. anywhere between the major
/// version number and its minor, patch, pre-release, or build metadata.
#[derive(Default, Clone, Eq, PartialEq, Hash, Debug)]
pub struct VersionReq {
pub comparators: Vec<Comparator>,
}
/// A pair of comparison operator and partial version, such as `>=1.2`. Forms
/// one piece of a VersionReq.
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
pub struct Comparator {
pub op: Op,
pub major: u64,
pub minor: Option<u64>,
/// Patch is only allowed if minor is Some.
pub patch: Option<u64>,
/// Non-empty pre-release is only allowed if patch is Some.
pub pre: Prerelease,
}
/// SemVer comparison operator: `=`, `>`, `>=`, `<`, `<=`, `~`, `^`, `*`.
///
/// # Op::Exact
/// -  **`=I.J.K`** — exactly the version I.J.K
/// -  **`=I.J`** — equivalent to `>=I.J.0, <I.(J+1).0`
/// -  **`=I`** — equivalent to `>=I.0.0, <(I+1).0.0`
///
/// # Op::Greater
/// -  **`>I.J.K`**
/// -  **`>I.J`** — equivalent to `>=I.(J+1).0`
/// -  **`>I`** — equivalent to `>=(I+1).0.0`
///
/// # Op::GreaterEq
/// -  **`>=I.J.K`**
/// -  **`>=I.J`** — equivalent to `>=I.J.0`
/// -  **`>=I`** — equivalent to `>=I.0.0`
///
/// # Op::Less
/// -  **`<I.J.K`**
/// -  **`<I.J`** — equivalent to `<I.J.0`
/// -  **`<I`** — equivalent to `<I.0.0`
///
/// # Op::LessEq
/// -  **`<=I.J.K`**
/// -  **`<=I.J`** — equivalent to `<I.(J+1).0`
/// -  **`<=I`** — equivalent to `<(I+1).0.0`
///
/// # Op::Tilde ("patch" updates)
/// *Tilde requirements allow the **patch** part of the semver version (the third number) to increase.*
/// -  **`~I.J.K`** — equivalent to `>=I.J.K, <I.(J+1).0`
/// -  **`~I.J`** — equivalent to `=I.J`
/// -  **`~I`** — equivalent to `=I`
///
/// # Op::Caret ("compatible" updates)
/// *Caret requirements allow parts that are **right of the first nonzero** part of the semver version to increase.*
/// -  **`^I.J.K`** (for I\>0) — equivalent to `>=I.J.K, <(I+1).0.0`
/// -  **`^0.J.K`** (for J\>0) — equivalent to `>=0.J.K, <0.(J+1).0`
/// -  **`^0.0.K`** — equivalent to `=0.0.K`
/// -  **`^I.J`** (for I\>0 or J\>0) — equivalent to `^I.J.0`
/// -  **`^0.0`** — equivalent to `=0.0`
/// -  **`^I`** — equivalent to `=I`
///
/// # Op::Wildcard
/// -  **`I.J.*`** — equivalent to `=I.J`
/// -  **`I.*`** or **`I.*.*`** — equivalent to `=I`
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
#[cfg_attr(not(no_non_exhaustive), non_exhaustive)]
pub enum Op {
Exact,
Greater,
GreaterEq,
Less,
LessEq,
Tilde,
Caret,
Wildcard,
#[cfg(no_non_exhaustive)] // rustc <1.40
#[doc(hidden)]
__NonExhaustive,
}
/// Optional pre-release identifier on a version string. This comes after `-` in
/// a SemVer version, like `1.0.0-alpha.1`
///
/// # Examples
///
/// Some real world pre-release idioms drawn from crates.io:
///
/// - **[mio]** <code>0.7.0-<b>alpha.1</b></code> — the most common style
/// for numbering pre-releases.
///
/// - **[pest]** <code>1.0.0-<b>beta.8</b></code>, <code>1.0.0-<b>rc.0</b></code>
/// — this crate makes a distinction between betas and release
/// candidates.
///
/// - **[sassers]** <code>0.11.0-<b>shitshow</b></code> — ???.
///
/// - **[atomic-utils]** <code>0.0.0-<b>reserved</b></code> — a squatted
/// crate name.
///
/// [mio]: https://crates.io/crates/mio
/// [pest]: https://crates.io/crates/pest
/// [atomic-utils]: https://crates.io/crates/atomic-utils
/// [sassers]: https://crates.io/crates/sassers
///
/// *Tip:* Be aware that if you are planning to number your own pre-releases,
/// you should prefer to separate the numeric part from any non-numeric
/// identifiers by using a dot in between. That is, prefer pre-releases
/// `alpha.1`, `alpha.2`, etc rather than `alpha1`, `alpha2` etc. The SemVer
/// spec's rule for pre-release precedence has special treatment of numeric
/// components in the pre-release string, but only if there are no non-digit
/// characters in the same dot-separated component. So you'd have `alpha.2` <
/// `alpha.11` as intended, but `alpha11` < `alpha2`.
///
/// # Syntax
///
/// Pre-release strings are a series of dot separated identifiers immediately
/// following the patch version. Identifiers must comprise only ASCII
/// alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must not be
/// empty. Numeric identifiers must not include leading zeros.
///
/// # Total ordering
///
/// Pre-releases have a total order defined by the SemVer spec. It uses
/// lexicographic ordering of dot-separated components. Identifiers consisting
/// of only digits are compared numerically. Otherwise, identifiers are compared
/// in ASCII sort order. Any numeric identifier is always less than any
/// non-numeric identifier.
///
/// Example: `alpha` < `alpha.85` < `alpha.90` < `alpha.200` < `alpha.0a` < `alpha.1a0` < `alpha.a` < `beta`
#[derive(Default, Clone, Eq, PartialEq, Hash)]
pub struct Prerelease {
identifier: Identifier,
}
/// Optional build metadata identifier. This comes after `+` in a SemVer
/// version, as in `0.8.1+zstd.1.5.0`.
///
/// # Examples
///
/// Some real world build metadata idioms drawn from crates.io:
///
/// - **[libgit2-sys]** <code>0.12.20+<b>1.1.0</b></code> — for this
/// crate, the build metadata indicates the version of the C libgit2 library
/// that the Rust crate is built against.
///
/// - **[mashup]** <code>0.1.13+<b>deprecated</b></code> — just the word
/// "deprecated" for a crate that has been superseded by another. Eventually
/// people will take notice of this in Cargo's build output where it lists the
/// crates being compiled.
///
/// - **[google-bigquery2]** <code>2.0.4+<b>20210327</b></code> — this
/// library is automatically generated from an official API schema, and the
/// build metadata indicates the date on which that schema was last captured.
///
/// - **[fbthrift-git]** <code>0.0.6+<b>c7fcc0e</b></code> — this crate is
/// published from snapshots of a big company monorepo. In monorepo
/// development, there is no concept of versions, and all downstream code is
/// just updated atomically in the same commit that breaking changes to a
/// library are landed. Therefore for crates.io purposes, every published
/// version must be assumed to be incompatible with the previous. The build
/// metadata provides the source control hash of the snapshotted code.
///
/// [libgit2-sys]: https://crates.io/crates/libgit2-sys
/// [mashup]: https://crates.io/crates/mashup
/// [google-bigquery2]: https://crates.io/crates/google-bigquery2
/// [fbthrift-git]: https://crates.io/crates/fbthrift-git
///
/// # Syntax
///
/// Build metadata is a series of dot separated identifiers immediately
/// following the patch or pre-release version. Identifiers must comprise only
/// ASCII alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must
/// not be empty. Leading zeros *are* allowed, unlike any other place in the
/// SemVer grammar.
///
/// # Total ordering
///
/// Build metadata is ignored in evaluating `VersionReq`; it plays no role in
/// whether a `Version` matches any one of the comparison operators.
///
/// However for comparing build metadatas among one another, they do have a
/// total order which is determined by lexicographic ordering of dot-separated
/// components. Identifiers consisting of only digits are compared numerically.
/// Otherwise, identifiers are compared in ASCII sort order. Any numeric
/// identifier is always less than any non-numeric identifier.
///
/// Example: `demo` < `demo.85` < `demo.90` < `demo.090` < `demo.200` < `demo.1a0` < `demo.a` < `memo`
#[derive(Default, Clone, Eq, PartialEq, Hash)]
pub struct BuildMetadata {
identifier: Identifier,
}
impl Version {
/// Create `Version` with an empty pre-release and build metadata.
///
/// Equivalent to:
///
/// ```
/// # use semver::{BuildMetadata, Prerelease, Version};
/// #
/// # const fn new(major: u64, minor: u64, patch: u64) -> Version {
/// Version {
/// major,
/// minor,
/// patch,
/// pre: Prerelease::EMPTY,
/// build: BuildMetadata::EMPTY,
/// }
/// # }
/// ```
pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
Version {
major,
minor,
patch,
pre: Prerelease::EMPTY,
build: BuildMetadata::EMPTY,
}
}
/// Create `Version` by parsing from string representation.
///
/// # Errors
///
/// Possible reasons for the parse to fail include:
///
/// - `1.0` — too few numeric components. A SemVer version must have
/// exactly three. If you are looking at something that has fewer than
/// three numbers in it, it's possible it is a `VersionReq` instead (with
/// an implicit default `^` comparison operator).
///
/// - `1.0.01` — a numeric component has a leading zero.
///
/// - `1.0.unknown` — unexpected character in one of the components.
///
/// - `1.0.0-` or `1.0.0+` — the pre-release or build metadata are
/// indicated present but empty.
///
/// - `1.0.0-alpha_123` — pre-release or build metadata have something
/// outside the allowed characters, which are `0-9`, `A-Z`, `a-z`, `-`,
/// and `.` (dot).
///
/// - `23456789999999999999.0.0` — overflow of a u64.
pub fn parse(text: &str) -> Result<Self, Error> {
Version::from_str(text)
}
}
impl VersionReq {
/// A `VersionReq` with no constraint on the version numbers it matches.
/// Equivalent to `VersionReq::parse("*").unwrap()`.
///
/// In terms of comparators this is equivalent to `>=0.0.0`.
///
/// Counterintuitively a `*` VersionReq does not match every possible
/// version number. In particular, in order for *any* `VersionReq` to match
/// a pre-release version, the `VersionReq` must contain at least one
/// `Comparator` that has an explicit major, minor, and patch version
/// identical to the pre-release being matched, and that has a nonempty
/// pre-release component. Since `*` is not written with an explicit major,
/// minor, and patch version, and does not contain a nonempty pre-release
/// component, it does not match any pre-release versions.
#[cfg(not(no_const_vec_new))] // rustc <1.39
pub const STAR: Self = VersionReq {
comparators: Vec::new(),
};
/// Create `VersionReq` by parsing from string representation.
///
/// # Errors
///
/// Possible reasons for the parse to fail include:
///
/// - `>a.b` — unexpected characters in the partial version.
///
/// - `@1.0.0` — unrecognized comparison operator.
///
/// - `^1.0.0, ` — unexpected end of input.
///
/// - `>=1.0 <2.0` — missing comma between comparators.
///
/// - `*.*` — unsupported wildcard syntax.
pub fn parse(text: &str) -> Result<Self, Error> {
VersionReq::from_str(text)
}
/// Evaluate whether the given `Version` satisfies the version requirement
/// described by `self`.
pub fn matches(&self, version: &Version) -> bool {
eval::matches_req(self, version)
}
}
impl Comparator {
pub fn parse(text: &str) -> Result<Self, Error> {
Comparator::from_str(text)
}
pub fn matches(&self, version: &Version) -> bool {
eval::matches_comparator(self, version)
}
}
impl Prerelease {
pub const EMPTY: Self = Prerelease {
identifier: Identifier::empty(),
};
pub fn new(text: &str) -> Result<Self, Error> {
Prerelease::from_str(text)
}
pub fn as_str(&self) -> &str {
self.identifier.as_str()
}
pub fn is_empty(&self) -> bool {
self.identifier.is_empty()
}
}
impl BuildMetadata {
pub const EMPTY: Self = BuildMetadata {
identifier: Identifier::empty(),
};
pub fn new(text: &str) -> Result<Self, Error> {
BuildMetadata::from_str(text)
}
pub fn as_str(&self) -> &str {
self.identifier.as_str()
}
pub fn is_empty(&self) -> bool {
self.identifier.is_empty()
}
}
| true |
48d11db5ccb940b2f1803cbd9b0a3329d5c2043c
|
Rust
|
vikrem/linkerd2-proxy
|
/linkerd/proxy/detect/src/lib.rs
|
UTF-8
| 4,987 | 2.609375 | 3 |
[
"Apache-2.0"
] |
permissive
|
use linkerd2_error::Error;
use linkerd2_io::{BoxedIo, Peek};
use linkerd2_proxy_core as core;
use pin_project::{pin_project, project};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// A strategy for detecting values out of a client transport.
pub trait Detect<T>: Clone {
type Target;
/// If the target can be determined by the target alone (i.e. because it's
/// known to be a server-speaks-first target), Otherwise, the target is
/// returned as an error.
fn detect_before_peek(&self, target: T) -> Result<Self::Target, T>;
/// If the target could not be determined without peeking, then used the
/// peeked prefix to determine the protocol.
fn detect_peeked_prefix(&self, target: T, prefix: &[u8]) -> Self::Target;
}
#[derive(Debug, Clone)]
pub struct DetectProtocolLayer<D> {
detect: D,
peek_capacity: usize,
}
#[derive(Debug, Clone)]
pub struct DetectProtocol<D, A> {
detect: D,
accept: A,
peek_capacity: usize,
}
#[pin_project]
pub struct AcceptFuture<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
{
#[pin]
state: State<T, D, A>,
}
#[pin_project]
enum State<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
{
Accept(#[pin] A::Future),
Detect {
detect: D,
accept: A,
#[pin]
inner: PeekAndDetect<T, D>,
},
}
#[pin_project]
pub enum PeekAndDetect<T, D: Detect<T>> {
// Waiting for accept to become ready.
Detected(Option<(D::Target, BoxedIo)>),
// Waiting for the prefix to be read.
Peek(Option<T>, #[pin] Peek<BoxedIo>),
}
impl<D> DetectProtocolLayer<D> {
const DEFAULT_CAPACITY: usize = 8192;
pub fn new(detect: D) -> Self {
Self {
detect,
peek_capacity: Self::DEFAULT_CAPACITY,
}
}
}
impl<D: Clone, A> tower::layer::Layer<A> for DetectProtocolLayer<D> {
type Service = DetectProtocol<D, A>;
fn layer(&self, accept: A) -> Self::Service {
Self::Service {
detect: self.detect.clone(),
peek_capacity: self.peek_capacity,
accept,
}
}
}
impl<T, D, A> tower::Service<(T, BoxedIo)> for DetectProtocol<D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)> + Clone,
D::Target: std::fmt::Debug,
{
type Response = A::ConnectionFuture;
type Error = Error;
type Future = AcceptFuture<T, D, A>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.accept.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, (target, io): (T, BoxedIo)) -> Self::Future {
match self.detect.detect_before_peek(target) {
Ok(detected) => AcceptFuture {
state: State::Accept(self.accept.accept((detected, io))),
},
Err(target) => AcceptFuture {
state: State::Detect {
detect: self.detect.clone(),
accept: self.accept.clone(),
inner: PeekAndDetect::Peek(
Some(target),
Peek::with_capacity(self.peek_capacity, io),
),
},
},
}
}
}
impl<T, D, A> Future for AcceptFuture<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
A::Error: Into<Error>,
{
type Output = Result<A::ConnectionFuture, Error>;
#[project]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
#[project]
match this.state.as_mut().project() {
State::Accept(fut) => return fut.poll(cx).map_err(Into::into),
State::Detect {
detect,
accept,
mut inner,
} =>
{
#[project]
match inner.as_mut().project() {
PeekAndDetect::Peek(target, peek) => {
let io = futures::ready!(peek.poll(cx))?;
let target = detect.detect_peeked_prefix(
target.take().expect("polled after complete"),
io.prefix().as_ref(),
);
inner.set(PeekAndDetect::Detected(Some((target, BoxedIo::new(io)))));
}
PeekAndDetect::Detected(io) => {
futures::ready!(accept.poll_ready(cx)).map_err(Into::into)?;
let io = io.take().expect("polled after complete");
let accept = accept.accept(io);
this.state.set(State::Accept(accept));
}
}
}
}
}
}
}
| true |
b6d81c360fc9e5655ca5c00740708a5d33511de0
|
Rust
|
aldhsu/fuzzy-matcher
|
/src/skim.rs
|
UTF-8
| 10,511 | 2.8125 | 3 |
[
"MIT"
] |
permissive
|
///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
&[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
}
| true |
c32c64587b0b9c1ef7b7f2f9c2a046575b6ebf8e
|
Rust
|
lukisko/rust_chap_10
|
/src/main.rs
|
UTF-8
| 1,857 | 3.65625 | 4 |
[] |
no_license
|
fn main() {
println!("Hello, world!");
let poin = Point {x:10.0,y:20.0};
println!("x part of point is {} and distance from origin is {}.",poin.x(),poin.distance_from_origin());
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &number in list {
if number > largest {
largest = number;
}
}
return largest;
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
impl Point<f32> {
fn distance_from_origin(&self) -> f32 {
(self.x.powi(2) + self.y.powi(2)).sqrt()
}
}
//trait is interface
pub trait Summary{
fn summarize(&self) -> String{
String::from("Read more...")
}
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
//using default implementation
impl Summary for NewsArticle {}
//to return trait
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from(
"of course, as you probably already know, people",
),
reply: false,
retweet: false,
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
}
pub trait Sum {}
// to play what should types implement just ad plus and they need to implement both
pub trait Test {
fn some_function<T, U>(t: &T, u: &U) -> i32
where T: Summary + Clone,
U: Clone + Sum;
}
//lifetime of input and output (shorter one is used)
fn longest<'a>(x: &'a str, y: &'a str) -> &'a str {
if x.len() > y.len() {
x
} else {
y
}
}
| true |
74c1c06e568a2010de3eb55578c441a9c2c94dc9
|
Rust
|
SnakeSolid/rust-gantt-diagram
|
/src/database/mod.rs
|
UTF-8
| 4,849 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
mod error;
pub use self::error::DatabaseError;
pub use self::error::DatabaseResult;
use fallible_iterator::FallibleIterator;
use postgres::params::ConnectParams;
use postgres::params::Host;
use postgres::Connection;
use postgres::TlsMode;
use time::strptime;
use time::Timespec;
#[derive(Debug)]
pub struct PostgreSQL {
server: String,
port: u16,
user: String,
password: String,
}
const DEFAULT_DATABASE: &str = "postgres";
const FETCH_LIMIT: i32 = 1_000;
impl PostgreSQL {
pub fn new(server: &str, port: u16, user: &str, password: &str) -> PostgreSQL {
PostgreSQL {
server: server.into(),
port: port,
user: user.into(),
password: password.into(),
}
}
pub fn database_names(&self) -> DatabaseResult<Vec<String>> {
let connection = self.connect(None)?;
let mut result = Vec::new();
for row in &connection
.query(include_str!("sql/databases.sql"), &[])
.map_err(DatabaseError::query_execution_error)?
{
let name = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?;
result.push(
name.map_err(|error| DatabaseError::conversion_error(error, "database name"))?,
);
}
Ok(result)
}
pub fn stage_names(&self, database: &str) -> DatabaseResult<Vec<String>> {
let connection = self.connect(Some(database))?;
let mut result = Vec::new();
for row in &connection
.query(include_str!("sql/stages.sql"), &[])
.map_err(DatabaseError::query_execution_error)?
{
let name = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?;
result
.push(name.map_err(|error| DatabaseError::conversion_error(error, "maker name"))?);
}
Ok(result)
}
pub fn data<F, E>(
&self,
database: &str,
stage: &str,
mut callback: F,
) -> DatabaseResult<Result<(), E>>
where
F: FnMut(&str, Timespec, Timespec, &str, &str) -> Result<(), E>,
{
let connection = self.connect(Some(database))?;
let statement = connection
.prepare(include_str!("sql/data.sql"))
.map_err(DatabaseError::prepare_query_error)?;
let transaction = connection
.transaction()
.map_err(DatabaseError::transaction_error)?;
let mut rows = statement
.lazy_query(&transaction, &[&stage], FETCH_LIMIT)
.map_err(DatabaseError::query_execution_error)?;
while let Some(row) = rows.next().map_err(DatabaseError::query_execution_error)? {
let name: String = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "name"))?;
let start_time_str: String = row
.get_opt(1)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "start tame"))?;
let end_time_str: String = row
.get_opt(2)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "end time"))?;
let group: String = row
.get_opt(3)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "group name"))?;
let thread: String = row
.get_opt(4)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "thread name"))?;
let start_time = strptime(&start_time_str, "%Y-%m-%d %H:%M:%S,%f")
.map_err(DatabaseError::time_parse_error)?
.to_timespec();
let end_time = strptime(&end_time_str, "%Y-%m-%d %H:%M:%S,%f")
.map_err(DatabaseError::time_parse_error)?
.to_timespec();
if let Err(err) = callback(&name, start_time, end_time, &group, &thread) {
return Ok(Err(err));
}
}
Ok(Ok(()))
}
fn connect(&self, database: Option<&str>) -> DatabaseResult<Connection> {
let password = Some(self.password.as_str()).filter(|w| !w.is_empty());
let params = ConnectParams::builder()
.port(self.port)
.user(&self.user, password)
.database(database.unwrap_or(DEFAULT_DATABASE))
.build(Host::Tcp(self.server.clone()));
Connection::connect(params, TlsMode::None).map_err(DatabaseError::connection_error)
}
}
| true |
d7165d8f059af78c4249d911b03d72631a456a78
|
Rust
|
adcopeland/peuler
|
/rust/peuler/src/bin/p10.rs
|
UTF-8
| 147 | 2.890625 | 3 |
[] |
no_license
|
fn main() {
let mut sum: u64 = 0;
for i in 1..2000000 {
if peuler::is_prime(i) {
sum += i as u64
}
}
println!("{}", sum);
}
| true |
a863b5d07a247b004ad140a36da00e116924662e
|
Rust
|
rhysd/Shiba
|
/v2/src/markdown/parser.rs
|
UTF-8
| 36,182 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
use super::sanitizer::{should_rebase_url, Sanitizer, SlashPath};
use crate::renderer::RawMessageWriter;
use aho_corasick::AhoCorasick;
use emojis::Emoji;
use memchr::{memchr_iter, Memchr};
use pulldown_cmark::{
Alignment, CodeBlockKind, CowStr, Event, HeadingLevel, LinkType, MathDisplay, Options, Parser,
Tag,
};
use std::cmp;
use std::collections::HashMap;
use std::io::{Read, Result, Write};
use std::iter::Peekable;
use std::marker::PhantomData;
use std::path::Path;
pub type Range = std::ops::Range<usize>;
pub trait TextVisitor: Default {
fn visit(&mut self, text: &str, range: &Range);
}
impl TextVisitor for () {
fn visit(&mut self, _text: &str, _range: &Range) {}
}
#[derive(Clone, Copy, Debug)]
pub enum TokenKind {
Normal,
MatchOther,
MatchCurrent,
MatchOtherStart,
MatchCurrentStart,
}
impl TokenKind {
fn tag(self) -> &'static str {
match self {
Self::MatchOther => "match",
Self::MatchCurrent => "match-current",
Self::MatchOtherStart => "match-start",
Self::MatchCurrentStart => "match-current-start",
Self::Normal => unreachable!(),
}
}
}
pub trait TextTokenizer {
fn tokenize<'t>(&mut self, text: &'t str, range: &Range) -> (TokenKind, &'t str);
}
impl TextTokenizer for () {
fn tokenize<'t>(&mut self, text: &'t str, _range: &Range) -> (TokenKind, &'t str) {
(TokenKind::Normal, text)
}
}
#[derive(Default)]
pub struct MarkdownContent {
source: String,
base_dir: SlashPath,
}
impl MarkdownContent {
pub fn new(source: String, base_dir: Option<&Path>) -> Self {
let base_dir =
if let Some(path) = base_dir { SlashPath::from(path) } else { SlashPath::default() };
Self { source, base_dir }
}
pub fn modified_offset(&self, new: &Self) -> Option<usize> {
let (prev_source, new_source) = (&self.source, &new.source);
prev_source
.as_bytes()
.iter()
.zip(new_source.as_bytes().iter())
.position(|(a, b)| a != b)
.or_else(|| {
let (prev_len, new_len) = (prev_source.len(), new_source.len());
(prev_len != new_len).then_some(cmp::min(prev_len, new_len))
})
}
pub fn is_empty(&self) -> bool {
self.source.is_empty() && self.base_dir.is_empty()
}
}
pub struct MarkdownParser<'a, V: TextVisitor, T: TextTokenizer> {
parser: Parser<'a, 'a>,
base_dir: &'a SlashPath,
offset: Option<usize>,
text_tokenizer: T,
_phantom: PhantomData<V>,
}
impl<'a, V: TextVisitor, T: TextTokenizer> MarkdownParser<'a, V, T> {
pub fn new(content: &'a MarkdownContent, offset: Option<usize>, text_tokenizer: T) -> Self {
let mut options = Options::empty();
options.insert(
Options::ENABLE_STRIKETHROUGH
| Options::ENABLE_FOOTNOTES
| Options::ENABLE_TABLES
| Options::ENABLE_TASKLISTS
| Options::ENABLE_MATH,
);
let parser = Parser::new_ext(&content.source, options);
let base_dir = &content.base_dir;
Self { parser, base_dir, offset, text_tokenizer, _phantom: PhantomData }
}
}
// Note: Build raw JavaScript expression which is evaluated to the render tree encoded as JSON value.
// This expression will be evaluated via `receive(JSON.parse('{"kind":"render_tree",...}'))` by renderer.
impl<'a, V: TextVisitor, T: TextTokenizer> RawMessageWriter for MarkdownParser<'a, V, T> {
type Output = V;
fn write_to(self, writer: impl Write) -> Result<Self::Output> {
let mut enc =
RenderTreeEncoder::new(writer, self.base_dir, self.offset, self.text_tokenizer);
enc.out.write_all(br#"JSON.parse('{"kind":"render_tree","tree":"#)?;
enc.push(self.parser)?;
enc.out.write_all(b"}')")?;
Ok(enc.text_visitor)
}
}
// To know the format of JSON value, see type definitions in web/ipc.ts
enum TableState {
Head,
Row,
}
// Note: Be careful, this function is called in the hot loop on encoding texts
#[inline]
#[allow(clippy::just_underscores_and_digits)]
fn encode_string_byte(mut out: impl Write, b: u8) -> Result<()> {
const BB: u8 = b'b'; // \x08
const TT: u8 = b't'; // \x09
const NN: u8 = b'n'; // \x0a
const FF: u8 = b'f'; // \x0c
const RR: u8 = b'r'; // \x0d
const DQ: u8 = b'"'; // \x22
const SQ: u8 = b'\''; // \x27
const BS: u8 = b'\\'; // \x5c
const XX: u8 = 1; // \x00...\x1f non-printable
const __: u8 = 0;
#[rustfmt::skip]
const ESCAPE_TABLE: [u8; 256] = [
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
XX, XX, XX, XX, XX, XX, XX, XX, BB, TT, NN, XX, FF, RR, XX, XX, // 0
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 1
__, __, DQ, __, __, __, __, SQ, __, __, __, __, __, __, __, __, // 2
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4
__, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, XX, // 7
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F
];
match ESCAPE_TABLE[b as usize] {
__ => out.write_all(&[b]),
BS => out.write_all(br#"\\\\"#), // Escape twice for JS and JSON (\\\\ → \\ → \)
SQ => out.write_all(br#"\'"#), // JSON string will be put in '...' JS string. ' needs to be escaped
XX => write!(out, r#"\\u{:04x}"#, b),
b => out.write_all(&[b'\\', b'\\', b]), // Escape \ itself: JSON.parse('\\n')
}
}
struct StringContentEncoder<W: Write>(W);
impl<W: Write> Write for StringContentEncoder<W> {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
for b in buf.iter().copied() {
encode_string_byte(&mut self.0, b)?;
}
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> {
self.0.flush()
}
}
struct RawHtmlReader<'a, I: Iterator<Item = (Event<'a>, Range)>> {
current: CowStr<'a>,
index: usize,
events: Peekable<I>,
stack: usize,
}
impl<'a, I: Iterator<Item = (Event<'a>, Range)>> RawHtmlReader<'a, I> {
fn new(current: CowStr<'a>, events: Peekable<I>) -> Self {
Self { current, index: 0, events, stack: 1 }
}
fn read_byte(&mut self) -> Option<u8> {
// Current event was consumed. Fetch next event otherwise return `None`.
while self.current.len() <= self.index {
if !matches!(self.events.peek(), Some((Event::Html(_) | Event::Text(_), _)))
|| self.stack == 0
{
return None;
}
self.current = match self.events.next().unwrap().0 {
Event::Html(html) => {
if html.starts_with("</") {
self.stack -= 1;
} else {
self.stack += 1;
}
html
}
Event::Text(text) => text,
_ => unreachable!(),
};
self.index = 0;
}
let b = self.current.as_bytes()[self.index];
self.index += 1;
Some(b)
}
}
impl<'a, I: Iterator<Item = (Event<'a>, Range)>> Read for RawHtmlReader<'a, I> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
for (i, dest) in buf.iter_mut().enumerate() {
if let Some(b) = self.read_byte() {
*dest = b;
} else {
return Ok(i);
}
}
Ok(buf.len())
}
}
struct RenderTreeEncoder<'a, W: Write, V: TextVisitor, T: TextTokenizer> {
out: W,
base_dir: &'a SlashPath,
table: TableState,
is_start: bool,
ids: HashMap<CowStr<'a>, usize>,
modified: Option<usize>,
text_visitor: V,
text_tokenizer: T,
autolinker: Autolinker,
sanitizer: Sanitizer<'a>,
in_code_block: bool,
}
impl<'a, W: Write, V: TextVisitor, T: TextTokenizer> RenderTreeEncoder<'a, W, V, T> {
fn new(w: W, base_dir: &'a SlashPath, modified: Option<usize>, text_tokenizer: T) -> Self {
Self {
out: w,
base_dir,
table: TableState::Head,
is_start: true,
ids: HashMap::new(),
modified,
text_visitor: V::default(),
text_tokenizer,
autolinker: Autolinker::default(),
sanitizer: Sanitizer::new(base_dir),
in_code_block: false,
}
}
fn push(&mut self, parser: Parser<'a, 'a>) -> Result<()> {
self.out.write_all(b"[")?;
self.events(parser)?;
// Modified offset was not consumed by any text, it would mean that some non-text parts after any text were
// modified. As a fallback, set 'modified' marker after the last text.
if self.modified.is_some() {
self.tag("modified")?;
self.out.write_all(b"}")?;
}
self.out.write_all(b"]")
}
fn string_content(&mut self, s: &str) -> Result<()> {
for b in s.as_bytes().iter().copied() {
encode_string_byte(&mut self.out, b)?;
}
Ok(())
}
fn string(&mut self, s: &str) -> Result<()> {
self.out.write_all(b"\"")?;
self.string_content(s)?;
self.out.write_all(b"\"")
}
fn alignment(&mut self, a: Alignment) -> Result<()> {
self.out.write_all(match a {
Alignment::None => b"null",
Alignment::Left => br#""left""#,
Alignment::Center => br#""center""#,
Alignment::Right => br#""right""#,
})
}
fn id(&mut self, name: CowStr<'a>) -> usize {
let new = self.ids.len() + 1;
*self.ids.entry(name).or_insert(new)
}
fn comma(&mut self) -> Result<()> {
if !self.is_start {
self.out.write_all(b",")?;
} else {
self.is_start = false;
}
Ok(())
}
fn tag(&mut self, name: &str) -> Result<()> {
self.comma()?;
write!(self.out, r#"{{"t":"{}""#, name)
}
fn text_tokens(&mut self, mut input: &str, mut range: Range) -> Result<()> {
use TokenKind::*;
while !input.is_empty() {
let (token, text) = self.text_tokenizer.tokenize(input, &range);
match token {
Normal => {
self.comma()?;
self.string(text)?;
}
MatchOther | MatchCurrent | MatchOtherStart | MatchCurrentStart => {
self.tag(token.tag())?;
self.children_begin()?;
self.string(text)?;
self.tag_end()?;
}
}
input = &input[text.len()..];
range.start += text.len();
}
Ok(())
}
fn text(&mut self, text: &str, range: Range) -> Result<()> {
self.text_visitor.visit(text, &range);
let Some(offset) = self.modified else {
return self.text_tokens(text, range);
};
let Range { start, end } = range;
if end < offset {
return self.text_tokens(text, range);
}
// Handle the last modified offset with this text token
self.modified = None;
log::debug!("Handling last modified offset: {:?}", offset);
if offset <= start {
self.tag("modified")?;
self.out.write_all(b"}")?;
self.text_tokens(text, range)
} else if end == offset {
self.text_tokens(text, range)?;
self.tag("modified")?;
self.out.write_all(b"}")
} else {
let i = offset - start;
self.text_tokens(&text[..i], range.start..offset)?;
self.tag("modified")?;
self.out.write_all(b"}")?;
self.text_tokens(&text[i..], offset..range.end)
}
}
fn emoji_text(&mut self, text: &str, range: Range) -> Result<()> {
let mut start = range.start;
for token in EmojiTokenizer::new(text) {
match token {
EmojiToken::Text(text) => {
if !text.is_empty() {
self.text(text, start..start + text.len())?;
start += text.len();
}
}
EmojiToken::Emoji(emoji, len) => {
self.tag("emoji")?;
self.out.write_all(br#","name":"#)?;
self.string(emoji.name())?;
self.children_begin()?;
self.string(emoji.as_str())?;
self.tag_end()?;
start += len;
}
}
}
// Note: When some escaped text is included in input like "&", `start == range.end` invariant is violated here.
// That's OK because pulldown-cmark tokenizes any escaped text as small as possible to reduce extra heap allocation.
// For instance "foo & bar" is tokenized into three events Text("foo "), Text("&"), Test(" bar"). It means that
// any escaped charactor is followed by no text within the token.
Ok(())
}
fn autolink_text(&mut self, mut text: &str, range: Range) -> Result<()> {
let Range { mut start, end } = range;
while let Some((s, e)) = self.autolinker.find_autolink(text) {
if s > 0 {
self.emoji_text(&text[..s], start..start + s)?;
}
let url = &text[s..e];
log::debug!("Auto-linking URL: {}", url);
self.tag("a")?;
self.out.write_all(br#","auto":true,"href":"#)?;
self.string(url)?;
self.children_begin()?;
self.text(url, start + s..start + e)?;
self.tag_end()?;
text = &text[e..];
start += e;
}
if !text.is_empty() {
self.emoji_text(text, start..end)?;
}
Ok(())
}
fn events(&mut self, parser: Parser<'a, 'a>) -> Result<()> {
use Event::*;
let mut events = parser.into_offset_iter().peekable();
while let Some((event, range)) = events.next() {
match event {
Start(tag) => {
let next_event = events.peek().map(|(e, _)| e);
self.start_tag(tag, next_event)?;
}
End(tag) => self.end_tag(tag)?,
Text(text) if self.in_code_block => self.text(&text, range)?,
Text(text) => self.autolink_text(&text, range)?,
Code(text) => {
let pad = (range.len() - text.len()) / 2;
let inner_range = (range.start + pad)..(range.end - pad);
self.tag("code")?;
self.children_begin()?;
self.text(&text, inner_range)?;
self.tag_end()?;
}
Html(html) => {
self.tag("html")?;
self.out.write_all(br#","raw":""#)?;
let mut dst = StringContentEncoder(&mut self.out);
let mut src = RawHtmlReader::new(html, events);
self.sanitizer.clean(&mut dst, &mut src)?;
events = src.events;
self.out.write_all(br#""}"#)?;
}
SoftBreak => self.text("\n", range)?,
HardBreak => {
self.tag("br")?;
self.out.write_all(b"}")?;
}
Rule => {
self.tag("hr")?;
self.out.write_all(b"}")?;
}
FootnoteReference(name) => {
self.tag("fn-ref")?;
let id = self.id(name);
write!(self.out, r#","id":{}}}"#, id)?;
}
TaskListMarker(checked) => {
self.tag("checkbox")?;
write!(self.out, r#","checked":{}}}"#, checked)?;
}
Math(display, text) => {
self.tag("math")?;
write!(self.out, r#","inline":{},"expr":"#, display == MathDisplay::Inline)?;
self.string(&text)?;
self.out.write_all(b"}")?;
}
}
}
Ok(())
}
fn rebase_link(&mut self, dest: &str) -> Result<()> {
if !should_rebase_url(dest) {
return self.string(dest);
}
// Rebase 'foo/bar/' with '/path/to/base' as '/path/to/base/foo/bar'
self.out.write_all(b"\"")?;
self.string_content(self.base_dir)?;
if !dest.starts_with('/') {
self.out.write_all(b"/")?;
}
self.string_content(dest)?;
self.out.write_all(b"\"")
}
fn children_begin(&mut self) -> Result<()> {
self.is_start = true;
self.out.write_all(br#","c":["#)
}
fn tag_end(&mut self) -> Result<()> {
self.is_start = false;
self.out.write_all(b"]}")
}
fn start_tag(&mut self, tag: Tag<'a>, next: Option<&Event>) -> Result<()> {
use Tag::*;
match tag {
Paragraph => {
self.tag("p")?;
}
Heading(level, id, _) => {
self.tag("h")?;
let level: u8 = match level {
HeadingLevel::H1 => 1,
HeadingLevel::H2 => 2,
HeadingLevel::H3 => 3,
HeadingLevel::H4 => 4,
HeadingLevel::H5 => 5,
HeadingLevel::H6 => 6,
};
write!(self.out, r#","level":{}"#, level)?;
if let Some(id) = id {
self.out.write_all(br#","id":"#)?;
self.string(id)?;
}
}
Table(alignments) => {
self.tag("table")?;
self.out.write_all(br#","align":["#)?;
let mut alignments = alignments.into_iter();
if let Some(a) = alignments.next() {
self.alignment(a)?;
}
for a in alignments {
self.out.write_all(b",")?;
self.alignment(a)?;
}
self.out.write_all(b"]")?;
}
TableHead => {
self.table = TableState::Head;
self.tag("thead")?;
self.children_begin()?;
self.tag("tr")?;
}
TableRow => {
self.table = TableState::Row;
self.tag("tr")?;
}
TableCell => {
let tag = match self.table {
TableState::Head => "th",
TableState::Row => "td",
};
self.tag(tag)?;
}
BlockQuote => {
self.tag("blockquote")?;
}
CodeBlock(info) => {
self.tag("pre")?;
self.children_begin()?;
self.tag("code")?;
if let CodeBlockKind::Fenced(info) = info {
if let Some(lang) = info.split(' ').next() {
if !lang.is_empty() {
self.out.write_all(br#","lang":"#)?;
self.string(lang)?;
}
}
}
self.in_code_block = true;
}
List(Some(1)) => self.tag("ol")?,
List(Some(start)) => {
self.tag("ol")?;
write!(self.out, r#","start":{}"#, start)?;
}
List(None) => self.tag("ul")?,
Item => {
if let Some(Event::TaskListMarker(_)) = next {
self.tag("task-list")?;
} else {
self.tag("li")?;
}
}
Emphasis => self.tag("em")?,
Strong => self.tag("strong")?,
Strikethrough => self.tag("del")?,
Link(LinkType::Autolink, _, _) => return Ok(()), // Ignore autolink since it is linked by `Autolinker`
Link(link_type, dest, title) => {
self.tag("a")?;
self.out.write_all(br#","href":"#)?;
match link_type {
LinkType::Email => {
self.out.write_all(b"\"mailto:")?;
self.string_content(&dest)?;
self.out.write_all(b"\"")?;
}
_ => self.rebase_link(&dest)?,
}
if !title.is_empty() {
self.out.write_all(br#","title":"#)?;
self.string(&title)?;
}
}
Image(_link_type, dest, title) => {
self.tag("img")?;
if !title.is_empty() {
self.out.write_all(br#","title":"#)?;
self.string(&title)?;
}
self.out.write_all(br#","src":"#)?;
self.rebase_link(&dest)?;
}
FootnoteDefinition(name) => {
self.tag("fn-def")?;
if !name.is_empty() {
self.out.write_all(br#","name":"#)?;
self.string(&name)?;
}
let id = self.id(name);
write!(self.out, r#","id":{}"#, id)?;
}
}
// Tag element must have its children (maybe empty)
self.children_begin()
}
fn end_tag(&mut self, tag: Tag<'a>) -> Result<()> {
use Tag::*;
match tag {
Link(LinkType::Autolink, _, _) => Ok(()), // Ignore autolink since it is linked by `Autolinker`
Paragraph
| Heading(_, _, _)
| TableRow
| TableCell
| BlockQuote
| List(_)
| Item
| Emphasis
| Strong
| Strikethrough
| Link(_, _, _)
| Image(_, _, _)
| FootnoteDefinition(_) => self.tag_end(),
CodeBlock(_) => {
self.in_code_block = false;
self.tag_end()?;
self.tag_end()
}
Table(_) => {
self.tag_end()?;
self.tag_end()
}
TableHead => {
self.tag_end()?;
self.tag_end()?;
self.tag("tbody")?;
self.children_begin()
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum UrlCharKind {
Invalid,
Term,
NonTerm,
}
impl UrlCharKind {
fn of(c: char) -> Self {
// https://www.rfc-editor.org/rfc/rfc3987
match c {
'\u{00}'..='\u{1F}'
| ' '
| '|'
| '"'
| '<'
| '>'
| '`'
| '('
| ')'
| '['
| ']'
| '\u{7F}'..='\u{9F}' => Self::Invalid,
'?' | '!' | '.' | ',' | ':' | ';' | '*' | '&' | '\\' | '{' | '}' | '\'' => {
Self::NonTerm
}
_ => Self::Term,
}
}
}
struct Autolinker(AhoCorasick);
impl Default for Autolinker {
fn default() -> Self {
Self(AhoCorasick::new(["https://", "http://"]).unwrap())
}
}
impl Autolinker {
fn find_autolink(&self, text: &str) -> Option<(usize, usize)> {
for mat in self.0.find_iter(text) {
let (start, scheme_end) = (mat.start(), mat.end());
if let Some(c) = text[..start].chars().next_back() {
if c.is_ascii_alphabetic() {
// Note: "foohttp://example.com" is not URL but "123http://example.com" contains URL
continue;
}
}
let mut len = 0;
for (i, c) in text[scheme_end..].char_indices() {
match UrlCharKind::of(c) {
UrlCharKind::Invalid => break,
UrlCharKind::Term => {
len = i + c.len_utf8();
}
UrlCharKind::NonTerm => {}
}
}
if len > 0 {
return Some((start, scheme_end + len));
}
}
None
}
}
#[derive(Debug)]
enum EmojiToken<'a> {
Text(&'a str),
Emoji(&'static Emoji, usize),
}
struct EmojiTokenizer<'a> {
text: &'a str,
iter: Memchr<'a>,
start: usize,
}
impl<'a> EmojiTokenizer<'a> {
fn new(text: &'a str) -> Self {
Self { iter: memchr_iter(b':', text.as_bytes()), text, start: 0 }
}
fn eat(&mut self, end: usize) -> &'a str {
let text = &self.text[self.start..end];
self.start = end;
text
}
}
impl<'a> Iterator for EmojiTokenizer<'a> {
type Item = EmojiToken<'a>;
// Tokenizing example:
// "foo :dog: bar :piyo: wow"
// -> ":dog: bar :piyo: wow" (text "foo ")
// -> " bar :piyo: wow" (emoji "dog")
// -> ":piyo: wow" (text " bar ")
// -> ": wow" (text ":piyo")
// -> "" (text ": wow")
fn next(&mut self) -> Option<Self::Item> {
if self.start == self.text.len() {
return None;
}
let Some(end) = self.iter.next() else {
return Some(EmojiToken::Text(self.eat(self.text.len()))); // Eat all of rest
};
if self.start == end {
// Edge case: The initial input text starts with ':'
return self.next();
}
if !self.text[self.start..].starts_with(':') {
return Some(EmojiToken::Text(self.eat(end)));
}
// Note:
// text[start..end+1] == ":dog:"
// text[start+1..end] == "dog"
// text[start..end] == ":dog"
let short = &self.text[self.start + 1..end];
if let Some(emoji) = emojis::get_by_shortcode(short) {
self.start = end + 1;
Some(EmojiToken::Emoji(emoji, short.len() + 2))
} else {
Some(EmojiToken::Text(self.eat(end)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::path::PathBuf;
fn load_data(name: &str) -> String {
let mut path = PathBuf::from("src");
path.push("markdown");
path.push("testdata");
path.push(format!("{}.md", name));
match fs::read_to_string(&path) {
Ok(text) => text,
Err(err) => panic!("Could not find Markdown test data at {:?}: {}", path, err),
}
}
macro_rules! snapshot_test {
($name:ident, $offset:expr, $basedir:expr) => {
#[test]
fn $name() {
let source = load_data(stringify!($name));
let target = MarkdownContent::new(source, $basedir);
let parser = MarkdownParser::new(&target, $offset, ());
let mut buf = Vec::new();
let () = parser.write_to(&mut buf).unwrap();
let buf = String::from_utf8(buf).unwrap();
// Revert extra escape for '...' JavaScript string
let buf = buf.replace("\\\\", "\\");
// Remove the `JSON.parse` call to restore JSON value passed to the function
let buf = buf.strip_prefix("JSON.parse('").unwrap();
let buf = buf.strip_suffix("')").unwrap();
// Check if the written output is in the valid JSON format
let json: serde_json::Value = match serde_json::from_str(buf) {
Ok(value) => value,
Err(err) => {
panic!("Invalid JSON input with error \"{}\": {}", err, buf);
}
};
insta::assert_json_snapshot!(json);
}
};
($name:ident) => {
snapshot_test!($name, None, None);
};
($name:ident, $offset:expr) => {
snapshot_test!($name, $offset, None);
};
}
snapshot_test!(paragraph);
snapshot_test!(blockquote);
snapshot_test!(list);
snapshot_test!(headings);
snapshot_test!(codeblock);
snapshot_test!(link);
snapshot_test!(html);
snapshot_test!(sanitized);
snapshot_test!(inline_code);
snapshot_test!(emphasis);
snapshot_test!(image);
snapshot_test!(autolink);
snapshot_test!(emoji);
snapshot_test!(table);
snapshot_test!(math);
snapshot_test!(strikethrough);
snapshot_test!(tasklist);
snapshot_test!(footnotes);
snapshot_test!(highlight);
snapshot_test!(not_link);
// Offset
snapshot_test!(offset_block, Some(30));
snapshot_test!(offset_begin, Some(0));
snapshot_test!(offset_after_end, Some(10000000));
snapshot_test!(offset_in_emphasis, Some(10));
// Relative link resolutions
#[cfg(target_os = "windows")]
const BASE_DIR: &str = r#"\a\b\c\d\e"#;
#[cfg(not(target_os = "windows"))]
const BASE_DIR: &str = "/a/b/c/d/e";
snapshot_test!(relative_links, None, Some(Path::new(BASE_DIR)));
mod visitor {
use super::*;
use crate::markdown::DisplayText;
macro_rules! snapshot_test {
($name:ident) => {
#[test]
fn $name() {
let source = load_data(stringify!($name));
let content = MarkdownContent::new(source, None);
let parser = MarkdownParser::new(&content, None, ());
let mut buf = Vec::new();
let visitor: DisplayText = parser.write_to(&mut buf).unwrap();
let text = &visitor.raw_text();
let source = &content.source;
let mut mapped = vec![];
for map in visitor.sourcemap() {
let slice = &source[map.clone()];
assert!(
source.contains(slice),
"{:?} does not contain {:?}",
source,
text,
);
mapped.push((slice, map.clone()));
}
insta::assert_debug_snapshot!((text, mapped));
}
};
}
snapshot_test!(paragraph);
snapshot_test!(blockquote);
snapshot_test!(list);
snapshot_test!(headings);
snapshot_test!(codeblock);
snapshot_test!(link);
snapshot_test!(html);
snapshot_test!(sanitized);
snapshot_test!(inline_code);
snapshot_test!(emphasis);
snapshot_test!(image);
snapshot_test!(autolink);
snapshot_test!(emoji);
snapshot_test!(table);
snapshot_test!(math);
snapshot_test!(strikethrough);
snapshot_test!(tasklist);
snapshot_test!(footnotes);
snapshot_test!(highlight);
snapshot_test!(not_link);
}
#[test]
fn emoji_tokenizer() {
#[derive(PartialEq, Eq, Debug)]
enum Tok {
T(&'static str),
E(&'static str, usize),
}
for (input, expected) in [
(":dog:", &[Tok::E("dog face", 5)][..]),
(":nerd_face:", &[Tok::E("nerd face", 11)][..]),
(":+1:", &[Tok::E("thumbs up", 4)][..]),
(":-1:", &[Tok::E("thumbs down", 4)][..]),
(":dog::cat:", &[Tok::E("dog face", 5), Tok::E("cat face", 5)][..]),
(":dog: :cat:", &[Tok::E("dog face", 5), Tok::T(" "), Tok::E("cat face", 5)][..]),
(
" :dog: :cat: ",
&[
Tok::T(" "),
Tok::E("dog face", 5),
Tok::T(" "),
Tok::E("cat face", 5),
Tok::T(" "),
][..],
),
(
"hello :dog: world :cat: nyan",
&[
Tok::T("hello "),
Tok::E("dog face", 5),
Tok::T(" world "),
Tok::E("cat face", 5),
Tok::T(" nyan"),
][..],
),
("hello, world", &[Tok::T("hello, world")][..]),
("", &[][..]),
("dog:", &[Tok::T("dog"), Tok::T(":")][..]),
(":dog", &[Tok::T(":dog")][..]),
(":this-is-not-an-emoji:", &[Tok::T(":this-is-not-an-emoji"), Tok::T(":")][..]),
(
":not-emoji:not-emoji:dog:",
&[Tok::T(":not-emoji"), Tok::T(":not-emoji"), Tok::E("dog face", 5)][..],
),
(
":not-emoji:not-emoji:dog:",
&[Tok::T(":not-emoji"), Tok::T(":not-emoji"), Tok::E("dog face", 5)][..],
),
("::::", &[Tok::T(":"), Tok::T(":"), Tok::T(":"), Tok::T(":")][..]),
] {
let actual = EmojiTokenizer::new(input)
.map(|tok| match tok {
EmojiToken::Text(text) => Tok::T(text),
EmojiToken::Emoji(emoji, len) => Tok::E(emoji.name(), len),
})
.collect::<Vec<_>>();
assert_eq!(expected, actual, "input={:?}", input);
}
}
#[test]
fn auto_linker() {
for (input, url) in [
("http://example.com", Some("http://example.com")),
("https://example.com", Some("https://example.com")),
("http://example.com/foo", Some("http://example.com/foo")),
("http://example.com/foo/", Some("http://example.com/foo/")),
("http://example.com&foo=bar", Some("http://example.com&foo=bar")),
("hello http://example.com world", Some("http://example.com")),
("[foo](http://example.com)", Some("http://example.com")),
("[http://example.com]", Some("http://example.com")),
("Nice URL https://example.com!", Some("https://example.com")),
("This is URL https://example.com.", Some("https://example.com")),
("Is this URL https://example.com?", Some("https://example.com")),
("He said 'https://example.com'", Some("https://example.com")),
("Open https://example.com, and click button", Some("https://example.com")),
("https://example.com&", Some("https://example.com")),
("123http://aaa.com", Some("http://aaa.com")),
("file:///foo/bar", None),
("", None),
("hello, world", None),
("http:", None),
("http://", None),
("foohttp://aaa.com", None),
] {
let found = Autolinker::default().find_autolink(input);
assert_eq!(
url.is_some(),
found.is_some(),
"input={input:?}, found={found:?}, expected={url:?}",
);
if let Some(url) = url {
let (s, e) = found.unwrap();
assert_eq!(url, &input[s..e]);
}
}
}
}
| true |
ea6f2d2b290899bebb21159217cfdf59f8c8008c
|
Rust
|
fnune/exercises
|
/linked_list/linked_list.rs
|
UTF-8
| 3,121 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#![feature(alloc)]
#![feature(shared)]
extern crate alloc;
extern crate core;
use alloc::boxed::{Box};
use core::ptr::{Shared};
struct Node<T> {
content: T,
next: Option<Shared<Node<T>>>,
}
impl<T> Node<T> {
fn new(content: T) -> Self {
Node {
next: None,
content,
}
}
fn pluck_content(node: Box<Self>) -> T {
node.content
}
}
struct FLinkedList<T> {
head: Option<Shared<Node<T>>>,
len: usize,
}
impl<T> FLinkedList<T> {
pub fn new() -> FLinkedList<T> {
FLinkedList {
head: None,
len: 0,
}
}
pub fn prepend(&mut self, element: T) {
let node = Box::new(Node::new(element));
self.prepend_node(node);
}
pub fn pop_head(&mut self) -> Option<T> {
self.pop_head_node().map(Node::pluck_content)
}
pub fn at(&self, index: usize) -> Option<T> {
self.node_at(index).map(Node::pluck_content)
}
fn prepend_node(&mut self, mut node: Box<Node<T>>) {
unsafe {
node.next = self.head;
self.head = Some(Shared::new(Box::into_raw(node)));
self.len += 1;
}
}
fn pop_head_node(&mut self) -> Option<Box<Node<T>>> {
self.head.map(|node| unsafe {
let node = Box::from_raw(node.as_ptr());
self.head = node.next;
self.len -= 1;
node
})
}
fn node_at(&self, mut index: usize) -> Option<Box<Node<T>>> {
if index >= self.len { None } else {
let mut current = self.head;
while index > 0 {
unsafe {
current = match current {
Some(element) => Box::from_raw(element.as_ptr()).next,
_ => None,
}
}
index -= 1;
}
unsafe {
match current {
Some(element) => Some(Box::from_raw(element.as_ptr())),
_ => None,
}
}
}
}
}
fn main() {
println!("Singly linked list exercise.");
}
#[test]
fn prepend_extends_list_length() {
let mut my_linked_list: FLinkedList<i32> = FLinkedList::new();
my_linked_list.prepend(4);
my_linked_list.prepend(2);
assert_eq!(my_linked_list.len, 2);
}
#[test]
fn prepend_and_pop_head_work() {
let mut my_linked_list: FLinkedList<&str> = FLinkedList::new();
my_linked_list.prepend("there");
my_linked_list.prepend("hello");
assert_eq!(my_linked_list.pop_head(), Some("hello"));
assert_eq!(my_linked_list.pop_head(), Some("there"));
assert_eq!(my_linked_list.pop_head(), None);
}
#[test]
fn node_at_works() {
let mut my_linked_list: FLinkedList<&str> = FLinkedList::new();
my_linked_list.prepend("Hello");
my_linked_list.prepend("World");
assert_eq!(my_linked_list.at(0), Some("World"));
// Segfault here - Does not happen if run in the main program (?)
assert_eq!(my_linked_list.at(1), Some("Hello"));
assert_eq!(my_linked_list.at(2), None);
}
| true |
daf52c17056783913857ced97bf466c4c89609f4
|
Rust
|
lain-dono/klein-rs
|
/glsl_shim.rs
|
UTF-8
| 4,759 | 3.296875 | 3 |
[] |
no_license
|
pub fn swizzle_index(c: char) -> usize {
match c {
'x' => 0,
'y' => 1,
'z' => 2,
'w' => 3,
_ => unimplemented!(),
}
}
/*
#define SWIZZLE(a, b, c, d) \
swizzle<swizzle_index(#a[0]), \
swizzle_index(#b[0]), \
swizzle_index(#c[0]), \
swizzle_index(#d[0])> \
a##b##c##d
#define SWIZZLE_3(a, b, c) \
SWIZZLE(a, b, c, x); \
SWIZZLE(a, b, c, y); \
SWIZZLE(a, b, c, z); \
SWIZZLE(a, b, c, w);
#define SWIZZLE_2(a, b) \
SWIZZLE_3(a, b, x); \
SWIZZLE_3(a, b, y); \
SWIZZLE_3(a, b, z); \
SWIZZLE_3(a, b, w);
#define SWIZZLE_1(a) \
SWIZZLE_2(a, x); \
SWIZZLE_2(a, y); \
SWIZZLE_2(a, z); \
SWIZZLE_2(a, w);
#define SWIZZLES \
SWIZZLE_1(x); \
SWIZZLE_1(y); \
SWIZZLE_1(z); \
SWIZZLE_1(w)
// Redefine various glsl types and keywords
#define in
#define out
*/
pub union vec4 {
}
/*
struct vec4
{
template <uint8_t a, uint8_t b, uint8_t c, uint8_t d>
struct swizzle
{
constexpr operator vec4() const noexcept
{
float const* data = reinterpret_cast<float const*>(this);
return {data[a], data[b], data[c], data[d]};
}
template <uint8_t e, uint8_t f, uint8_t g, uint8_t h>
vec4 operator*(swizzle<e, f, g, h> const& other) const noexcept
{
return static_cast<vec4>(*this) * static_cast<vec4>(other);
}
vec4 operator*(vec4 const& other) const noexcept
{
return static_cast<vec4>(*this) * other;
}
};
template <uint8_t i>
struct component
{
operator float() const noexcept
{
return reinterpret_cast<float const*>(this)[i];
}
vec4 operator*(vec4 const& other) const noexcept
{
return other * static_cast<float>(*this);
}
float operator-() const noexcept
{
return -reinterpret_cast<float const*>(this)[i];
}
component& operator=(float other) noexcept
{
reinterpret_cast<float*>(this)[i] = other;
return *this;
}
};
union
{
float data[4];
component<0> x;
component<1> y;
component<2> z;
component<3> w;
SWIZZLES;
};
vec4() = default;
vec4(float a, float b, float c, float d) noexcept
: data{a, b, c, d}
{}
vec4 operator*(float other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * other;
}
return result;
}
vec4& operator*=(float other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] = data[i] * other;
}
return *this;
}
template <uint8_t j>
vec4 operator*(component<j> const& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * static_cast<float>(other);
}
return result;
}
template <uint8_t j>
vec4& operator*=(component<j> const& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] = data[i] * static_cast<float>(other);
}
return *this;
}
vec4 operator+(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] + other.data[i];
}
return result;
}
vec4 operator*(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * other.data[i];
}
return result;
}
vec4 operator-(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] - other.data[i];
}
return result;
}
vec4& operator+=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] += other.data[i];
}
return *this;
}
vec4& operator*=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] *= other.data[i];
}
return *this;
}
vec4& operator-=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] -= other.data[i];
}
return *this;
}
};
float dot(vec4 const& a, vec4 const& b)
{
float result = 0;
for (size_t i = 0; i != 4; ++i)
{
result += a.data[i] * b.data[i];
}
return result;
}
*/
| true |
b6b508f2a95329f89a910c7d75cf8c779b10e643
|
Rust
|
nimiq/core-rs
|
/beserial/src/types.rs
|
UTF-8
| 6,198 | 2.96875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use crate::{Deserialize, ReadBytesExt, Serialize, SerializingError, WriteBytesExt};
use num;
#[allow(non_camel_case_types)]
#[derive(Ord, PartialOrd, Eq, PartialEq, Debug, Copy, Clone)]
pub struct uvar(u64);
impl From<uvar> for u64 {
fn from(u: uvar) -> Self { u.0 }
}
impl From<u64> for uvar {
fn from(u: u64) -> Self { uvar(u) }
}
impl num::FromPrimitive for uvar {
fn from_i64(n: i64) -> Option<Self> { if n < 0 { None } else { Some(uvar(n as u64)) } }
fn from_u64(n: u64) -> Option<Self> { Some(uvar(n)) }
}
impl num::ToPrimitive for uvar {
fn to_i64(&self) -> Option<i64> { if self.0 > i64::max_value() as u64 { None } else { Some(self.0 as i64) } }
fn to_u64(&self) -> Option<u64> { Some(self.0) }
}
impl Serialize for uvar {
fn serialize<W: WriteBytesExt>(&self, writer: &mut W) -> Result<usize, SerializingError> {
let mut size = 0;
if self.0 < 0x80 {
// Just that byte
size += Serialize::serialize(&(self.0 as u8), writer)?;
} else if self.0 < 0x4080 {
// +1 bytes
let x = self.0 - 0x80;
size += Serialize::serialize(&((x | 0x8000) as u16), writer)?;
} else if self.0 < 0x0020_4080 {
// +2 bytes
let x = self.0 - 0x4080;
size += Serialize::serialize(&(((x >> 8) | 0xC000) as u16), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x1020_4080 {
// +3 bytes
let x = self.0 - 0x0020_4080;
size += Serialize::serialize(&((x | 0xE000_0000) as u32), writer)?;
} else if self.0 < 0x0008_1020_4080 {
// +4 bytes
let x = self.0 - 0x1020_4080;
size += Serialize::serialize(&(((x >> 8) | 0xF000_0000) as u32), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x0408_1020_4080 {
// +5 bytes
let x = self.0 - 0x0008_1020_4080;
size += Serialize::serialize(&(((x >> 16) | 0xF800_0000) as u32), writer)?;
size += Serialize::serialize(&((x & 0xFFFF) as u16), writer)?;
} else if self.0 < 0x0002_0408_1020_4080 {
// +6 bytes
let x = self.0 - 0x0408_1020_4080;
size += Serialize::serialize(&(((x >> 24) | 0xFC00_0000) as u32), writer)?;
size += Serialize::serialize(&(((x >> 8) & 0xFFFF) as u16), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x0102_0408_1020_4080 {
// +7 bytes
let x = self.0 - 0x0002_0408_1020_4080;
size += Serialize::serialize(&((x | 0xFE00_0000_0000_0000) as u64), writer)?;
} else {
// +8 bytes
let x = self.0 - 0x0102_0408_1020_4080;
size += Serialize::serialize(&(((x >> 8) | 0xFF00_0000_0000_0000) as u64), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
}
Ok(size)
}
fn serialized_size(&self) -> usize {
if self.0 < 0x80 {
1
} else if self.0 < 0x4080 {
2
} else if self.0 < 0x0020_4080 {
3
} else if self.0 < 0x1020_4080 {
4
} else if self.0 < 0x0008_1020_4080 {
5
} else if self.0 < 0x0408_1020_4080 {
6
} else if self.0 < 0x0002_0408_1020_4080 {
7
} else if self.0 < 0x0102_0408_1020_4080 {
8
} else { 9 }
}
}
impl Deserialize for uvar {
fn deserialize<R: ReadBytesExt>(reader: &mut R) -> Result<Self, SerializingError> {
fn read<T: num::ToPrimitive + Deserialize, R: ReadBytesExt>(reader: &mut R) -> Result<u64, SerializingError> {
let n: T = Deserialize::deserialize(reader)?;
Ok(n.to_u64().unwrap())
}
let first_byte: u8 = Deserialize::deserialize(reader)?;
if first_byte == 0xFF {
// 8 bytes follow
let byte_1_8 = read::<u64, R>(reader)?;
if byte_1_8 > u64::max_value() - 0x0102_0408_1020_4080 {
return Err(SerializingError::Overflow);
}
Ok(uvar(byte_1_8 + 0x0102_0408_1020_4080))
} else if first_byte == 0xFE {
// 7 bytes follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_3 = read::<u16, R>(reader)?;
let byte_4_7 = read::<u32, R>(reader)?;
Ok(uvar((byte_1 << 48) + (byte_2_3 << 32) + byte_4_7 + 0x0002_0408_1020_4080))
} else if first_byte & 0xFC == 0xFC {
// 6 bytes follow
let byte_1_2 = read::<u16, R>(reader)?;
let byte_3_6 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x01) << 48) + (byte_1_2 << 32) + byte_3_6 + 0x0408_1020_4080))
} else if first_byte & 0xF8 == 0xF8 {
// 5 bytes to follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_5 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x03) << 40) + (byte_1 << 32) + byte_2_5 + 0x0008_1020_4080))
} else if first_byte & 0xF0 == 0xF0 {
// 4 bytes to follow
let byte_1_4 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x07) << 32) + byte_1_4 + 0x1020_4080))
} else if first_byte & 0xE0 == 0xE0 {
// 3 bytes to follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_3 = read::<u16, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x0f) << 24) + (byte_1 << 16) + byte_2_3 + 0x0020_4080))
} else if first_byte & 0xC0 == 0xC0 {
// 2 bytes to follow
let byte_1_2 = read::<u16, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x1f) << 16) + byte_1_2 + 0x4080))
} else if first_byte & 0x80 == 0x80 {
// 1 byte follows
let byte_1 = read::<u8, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x3f) << 8) + byte_1 + 0x80))
} else {
// Just that byte
Ok(uvar(u64::from(first_byte)))
}
}
}
| true |
4ae52db70e06e474eaa4596b8c39bbea4af6524a
|
Rust
|
rust-lang/rust
|
/library/portable-simd/crates/core_simd/src/alias.rs
|
UTF-8
| 4,097 | 2.859375 | 3 |
[
"Apache-2.0",
"MIT",
"LLVM-exception",
"NCSA",
"BSD-2-Clause",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
macro_rules! number {
{ 1 } => { "one" };
{ 2 } => { "two" };
{ 4 } => { "four" };
{ 8 } => { "eight" };
{ $x:literal } => { stringify!($x) };
}
macro_rules! plural {
{ 1 } => { "" };
{ $x:literal } => { "s" };
}
macro_rules! alias {
{
$(
$element_ty:ty = {
$($alias:ident $num_elements:tt)*
}
)*
} => {
$(
$(
#[doc = concat!("A SIMD vector with ", number!($num_elements), " element", plural!($num_elements), " of type [`", stringify!($element_ty), "`].")]
#[allow(non_camel_case_types)]
pub type $alias = $crate::simd::Simd<$element_ty, $num_elements>;
)*
)*
}
}
macro_rules! mask_alias {
{
$(
$element_ty:ty : $size:literal = {
$($alias:ident $num_elements:tt)*
}
)*
} => {
$(
$(
#[doc = concat!("A SIMD mask with ", number!($num_elements), " element", plural!($num_elements), " for vectors with ", $size, " element types.")]
///
#[doc = concat!(
"The layout of this type is unspecified, and may change between platforms and/or Rust versions, and code should not assume that it is equivalent to `[",
stringify!($element_ty), "; ", $num_elements, "]`."
)]
#[allow(non_camel_case_types)]
pub type $alias = $crate::simd::Mask<$element_ty, $num_elements>;
)*
)*
}
}
alias! {
i8 = {
i8x1 1
i8x2 2
i8x4 4
i8x8 8
i8x16 16
i8x32 32
i8x64 64
}
i16 = {
i16x1 1
i16x2 2
i16x4 4
i16x8 8
i16x16 16
i16x32 32
i16x64 64
}
i32 = {
i32x1 1
i32x2 2
i32x4 4
i32x8 8
i32x16 16
i32x32 32
i32x64 64
}
i64 = {
i64x1 1
i64x2 2
i64x4 4
i64x8 8
i64x16 16
i64x32 32
i64x64 64
}
isize = {
isizex1 1
isizex2 2
isizex4 4
isizex8 8
isizex16 16
isizex32 32
isizex64 64
}
u8 = {
u8x1 1
u8x2 2
u8x4 4
u8x8 8
u8x16 16
u8x32 32
u8x64 64
}
u16 = {
u16x1 1
u16x2 2
u16x4 4
u16x8 8
u16x16 16
u16x32 32
u16x64 64
}
u32 = {
u32x1 1
u32x2 2
u32x4 4
u32x8 8
u32x16 16
u32x32 32
u32x64 64
}
u64 = {
u64x1 1
u64x2 2
u64x4 4
u64x8 8
u64x16 16
u64x32 32
u64x64 64
}
usize = {
usizex1 1
usizex2 2
usizex4 4
usizex8 8
usizex16 16
usizex32 32
usizex64 64
}
f32 = {
f32x1 1
f32x2 2
f32x4 4
f32x8 8
f32x16 16
f32x32 32
f32x64 64
}
f64 = {
f64x1 1
f64x2 2
f64x4 4
f64x8 8
f64x16 16
f64x32 32
f64x64 64
}
}
mask_alias! {
i8 : "8-bit" = {
mask8x1 1
mask8x2 2
mask8x4 4
mask8x8 8
mask8x16 16
mask8x32 32
mask8x64 64
}
i16 : "16-bit" = {
mask16x1 1
mask16x2 2
mask16x4 4
mask16x8 8
mask16x16 16
mask16x32 32
mask16x64 64
}
i32 : "32-bit" = {
mask32x1 1
mask32x2 2
mask32x4 4
mask32x8 8
mask32x16 16
mask32x32 32
mask32x64 64
}
i64 : "64-bit" = {
mask64x1 1
mask64x2 2
mask64x4 4
mask64x8 8
mask64x16 16
mask64x32 32
mask64x64 64
}
isize : "pointer-sized" = {
masksizex1 1
masksizex2 2
masksizex4 4
masksizex8 8
masksizex16 16
masksizex32 32
masksizex64 64
}
}
| true |
e776a4de65e77bd610a53df3de60d70b76f4baed
|
Rust
|
cGuille/adventofcode
|
/src/bin/2020-day4-part1.rs
|
UTF-8
| 759 | 3.015625 | 3 |
[] |
no_license
|
use std::collections::HashSet;
fn main() {
let batch = include_str!("../../input/2020-day4.txt");
let valid_passport_count = batch
.split("\n\n")
.filter(|passport_str| has_required_attributes(passport_str))
.count();
println!("{}", valid_passport_count);
}
fn has_required_attributes(passport_str: &str) -> bool {
let attr_set: HashSet<&str> = passport_str
.split_whitespace()
.map(|attr_str| attr_str.split(":").next().unwrap())
.collect();
attr_set.contains("byr")
&& attr_set.contains("iyr")
&& attr_set.contains("eyr")
&& attr_set.contains("hgt")
&& attr_set.contains("hcl")
&& attr_set.contains("ecl")
&& attr_set.contains("pid")
}
| true |
6df5798a8f6e64debc86bb99cb243cd84ebb9111
|
Rust
|
guillaumebreton/ruin
|
/src/main.rs
|
UTF-8
| 3,674 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
use chrono::NaiveDate;
use clap::{Parser, Subcommand};
use diesel::prelude::*;
use crossterm::{
event::{DisableMouseCapture, EnableMouseCapture},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use std::io;
use std::io::Error;
use tui::{backend::CrosstermBackend, Terminal};
use model::Service;
pub mod model;
pub mod ofx;
pub mod schema;
pub mod ui;
#[derive(Parser, Debug)]
#[clap(author = "Author Name", version, about)]
struct Arguments {
#[clap(short, long, default_value_t = String::from("ruin.db"),forbid_empty_values = true)]
/// the file to explore
db_path: String,
#[clap(subcommand)]
cmd: SubCommand,
}
#[derive(Subcommand, Debug)]
enum SubCommand {
/// Count how many times the package is used
Import {
#[clap(short, long, default_value_t = String::from("data.ofx"),forbid_empty_values = true)]
/// the file to explore
file_path: String,
},
View {},
}
// This macro from `diesel_migrations` defines an `embedded_migrations` module
// containing a function named `run`. This allows the example to be run and
// tested without any outside setup of the database.
embed_migrations!();
fn main() {
let args = Arguments::parse();
let connection = SqliteConnection::establish(&args.db_path)
.unwrap_or_else(|_| panic!("Error connecting to db"));
embedded_migrations::run_with_output(&connection, &mut std::io::stdout()).unwrap();
match args.cmd {
SubCommand::Import { file_path } => import(&connection, &file_path),
SubCommand::View {} => view(&connection).unwrap(),
}
}
fn view(connection: &SqliteConnection) -> Result<(), Error> {
let service = Service {
connection: connection,
};
// setup terminal
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
// create app and run it
let res = ui::run_app(&mut terminal, service);
// restore terminal
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
if let Err(err) = res {
println!("{:?}", err)
}
Ok(())
}
fn import(connection: &SqliteConnection, file_path: &str) {
let service = Service {
connection: connection,
};
let data = ofx::load(&file_path).unwrap();
let account_data = data.message.response.aggregate.account;
let balance = data
.message
.response
.aggregate
.available_balance
.amount
.parse::<f32>()
.unwrap();
let account = service
.upsert_account(
"",
&account_data.account_type,
&account_data.account_number,
(balance * 100.0) as i32,
)
.unwrap();
for tx in data
.message
.response
.aggregate
.transaction_list
.transactions
{
let date_posted = NaiveDate::parse_from_str(&tx.date_posted, "%Y%m%d").unwrap();
// TODO change the parse to use international format.
let amount = tx.amount.replace(",", ".").parse::<f32>().unwrap();
service
.upsert_transaction(
&tx.description,
date_posted,
&tx.id,
(amount * 100.0) as i32,
account.id,
)
.unwrap();
}
}
| true |
7cd4f120aa60dc79f2e38c804d383a2851c17fb5
|
Rust
|
juneym/rust-lang-tutorial
|
/src/cli.rs
|
UTF-8
| 344 | 3.34375 | 3 |
[] |
no_license
|
// sample cli's use of arguments
use std::env;
pub fn run() {
let args: Vec<String> = env::args().collect();
let command = args[1].clone(); //why do you have to use .clone()??
println!("\nargs: {:?}", args);
println!("\ncommand: {}", command);
if command == "hello" {
println!("Hey there. Hello!");
}
}
| true |
1a8de1b6017c58016b2e6bb96b4df43836dc20c0
|
Rust
|
akshayknarayan/simulator
|
/src/node/switch/drop_tail_queue.rs
|
UTF-8
| 3,712 | 3.015625 | 3 |
[] |
no_license
|
use std::collections::VecDeque;
use node::Link;
use node::switch::Queue;
use packet::Packet;
#[derive(Debug)]
pub struct DropTailQueue{
limit_bytes: u32,
link: Link,
pkts: VecDeque<Packet>,
forced_next: Option<Packet>,
active: bool,
paused: bool,
}
impl DropTailQueue {
pub fn new(limit_bytes: u32, link: Link) -> Self {
DropTailQueue{
limit_bytes,
link,
pkts: VecDeque::new(),
forced_next: None,
active: false,
paused: false,
}
}
fn occupancy_bytes(&self) -> u32 {
self.pkts.iter().map(|p| p.get_size_bytes()).sum()
}
}
impl Queue for DropTailQueue {
fn link(&self) -> Link {
self.link
}
fn headroom(&self) -> u32 {
self.limit_bytes - self.occupancy_bytes()
}
fn enqueue(&mut self, p: Packet) -> Option<()> {
let occupancy_bytes = self.occupancy_bytes();
if occupancy_bytes + p.get_size_bytes() > self.limit_bytes {
// we have to drop this packet
return None;
}
self.pkts.push_back(p);
self.set_active(true);
Some(())
}
fn force_tx_next(&mut self, p: Packet) -> Option<()> {
self.forced_next = Some(p);
self.set_active(true);
Some(())
}
fn dequeue(&mut self) -> Option<Packet> {
if let None = self.forced_next {
if self.pkts.len() == 1 {
self.set_active(false);
}
self.pkts.pop_front()
} else {
self.forced_next.take()
}
}
fn discard_matching(&mut self, mut should_discard: Box<FnMut(Packet) -> bool>) -> usize {
let pkts = &mut self.pkts;
let after_pkts = pkts.iter().filter(|&&p| !should_discard(p)).map(|p| p.clone()).collect::<VecDeque<Packet>>();
let dropped = pkts.len() - after_pkts.len();
*pkts = after_pkts;
dropped
}
fn count_matching(&self, mut counter: Box<FnMut(Packet) -> bool>) -> usize {
self.pkts.iter().filter(|&&p| counter(p)).count()
}
fn is_active(&self) -> bool {
self.active && !self.paused
}
fn set_active(&mut self, a: bool) {
self.active = a;
}
fn is_paused(&self) -> bool {
self.paused
}
fn set_paused(&mut self, a: bool) {
self.paused = a;
}
}
#[cfg(test)]
mod tests {
use node::{Link, switch::Queue};
use packet::{Packet, PacketHeader};
use super::DropTailQueue;
#[test]
fn check_discard_matching() {
let mut q = DropTailQueue::new(15_000, Link{propagation_delay: 0, bandwidth_bps: 0, pfc_enabled: false, from: 0, to: 1});
let mut pkts = (0..).map(|seq| {
Packet::Data{
hdr: PacketHeader{
flow: 0,
from: 0,
to: 1,
},
seq,
length: 1460,
}
});
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
assert_eq!(q.headroom(), 1500 * 2);
let dropped = q.discard_matching(Box::new(|p| match p {
Packet::Data{seq, ..} => {
seq > 5
}
_ => unreachable!(),
}));
assert_eq!(dropped, 2);
assert_eq!(q.headroom(), 1500 * 4);
}
}
| true |
132709ffc8d95dd0ef2df42d5535185e7b8d1939
|
Rust
|
nigelgray/rust-audio-analyser
|
/src/wav_helpers.rs
|
UTF-8
| 1,689 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
use std::sync::atomic::{Ordering};
// To find the RMS gain
// - Calculate the RMS value of the generated audio
// - Calculate the RMS value of the recorded audio
// - Calculate the power between the signals, using the generated audio as the reference
// (positive value means amplification, negative means attenuation)
// - We are interested in the voltage gain, not the power gain hence:
// L = 20 × log (voltage ratio V2 / V1) in dB (V1 = Vin is the reference)
// See http://www.sengpielaudio.com/calculator-amplification.htm
pub fn calculate_rms() {
if let Some(generated_rms) = find_rms_value(crate::GENERATE_PATH) {
if let Some(recorded_rms) = find_rms_value(crate::RECORD_PATH) {
let ratio = recorded_rms/generated_rms;
let gain = 20.0 * ratio.log10();
crate::RMS_GAIN.store(f64::to_bits(gain), Ordering::SeqCst);
}
}
}
// RMS = Root-Mean-Squared
// - Sqaure each sample
// - Sum them together
// - Work out the mean of the final sum
// - Take the square root
fn find_rms_value(filename: &str) -> Option<f64> {
let mut reader = hound::WavReader::open(filename).unwrap();
let sqr_sum = match reader.spec().sample_format {
hound::SampleFormat::Int => reader.samples::<i16>().fold(0.0, |sqr_sum, s| {
let sample = s.unwrap() as f64;
sqr_sum + sample * sample
}),
hound::SampleFormat::Float => reader.samples::<f32>().fold(0.0, |sqr_sum, s| {
let sample = s.unwrap() as f64;
sqr_sum + sample * sample
}),
};
let rms_value = (sqr_sum / reader.len() as f64).sqrt();
Some(rms_value)
}
| true |
88a2a4ed3d920a6b50652f055c8c530333ae91e5
|
Rust
|
sria91-rlox/cat-lox
|
/src/lexer/core.rs
|
UTF-8
| 5,111 | 3.6875 | 4 |
[
"MIT"
] |
permissive
|
use super::token::*;
pub struct Lexer {
input: Vec<char>,
index: usize,
}
impl Iterator for Lexer {
type Item = Token;
fn next(&mut self) -> Option<Token> {
match self.advance() {
None => None,
// Operators
Some('+') => Some(Token::Plus),
Some('-') => Some(Token::Minus),
Some('*') => Some(Token::Asterisk),
Some('(') => Some(Token::LeftParentheses),
Some(')') => Some(Token::RightParentheses),
Some(',') => Some(Token::Comma),
Some(';') => Some(Token::Semicolon),
Some('{') => Some(Token::LeftBrace),
Some('}') => Some(Token::RightBrace),
Some('.') => Some(Token::Dot),
Some('<') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::LessEqual)
}
_ => Some(Token::LessThan),
},
Some('>') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::GreaterEqual)
}
_ => Some(Token::GreaterThan),
},
Some('=') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::Equal)
}
_ => Some(Token::Assign),
},
Some('!') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::NotEqual)
}
_ => Some(Token::Bang),
},
Some('/') => {
match self.peek() {
// comments
Some('/') => {
self.advance();
while let Some(current_char) = self.advance() {
if current_char == '\n' {
break;
}
}
self.next()
}
_ => Some(Token::Slash),
}
}
Some('"') => {
let mut literal = String::new();
while let Some(current_char) = self.advance() {
if current_char == '"' {
break;
}
literal.push(current_char);
}
Some(Token::LoxString(literal))
}
// Whitespace (must be checked after comments)
Some(' ') => self.next(),
Some('\t') => self.next(),
Some('\r') => self.next(),
Some('\n') => self.next(),
// literal, keyword, or number
Some(current_char) => {
// Todo: maybe it would be preferable to store a reference to a
// slice rather than storing a new heap allocated string.
let mut literal = String::new();
literal.push(current_char);
loop {
match self.peek() {
Some(next) => {
if is_blacklisted(&next) {
break;
}
if !is_part_of_number(current_char) && next == '.' {
break;
}
}
None => break,
}
if let Some(current_char) = self.advance() {
literal.push(current_char);
}
}
if keyword(&literal).is_some() {
keyword(&literal)
} else if literal.chars().all(is_part_of_number) {
Some(Token::Number(literal.parse::<f64>().unwrap()))
} else {
Some(Token::Ident(literal))
}
}
}
}
}
impl Lexer {
pub fn new(input: &str) -> Lexer {
Lexer {
input: input.chars().collect(),
index: 0,
}
}
fn advance(&mut self) -> Option<char> {
if self.index >= self.input.len() {
None
} else {
self.index += 1;
Some(self.input[self.index - 1])
}
}
fn peek(&self) -> Option<char> {
if self.index >= self.input.len() {
None
} else {
Some(self.input[self.index])
}
}
}
/// Is this char allowed to be in a literal?
///
/// TODO: if we ever need to add a new state, both this and the next
/// function above need to be changed. That violates the open closed
/// principle, investigate refactoring.
fn is_blacklisted(c: &char) -> bool {
let blacklist = vec![
'+', '-', '*', '<', '>', '(', ')', ',', ';', '{', '}', '=', '!', '/', ' ', '\t', '\r', '\n'
];
blacklist.contains(c)
}
fn is_part_of_number(c: char) -> bool {
c.is_digit(10) || c == '.'
}
| true |
1e121ec8be3577093268d22579675c828345cb74
|
Rust
|
cloew/KaoBoy
|
/src/cpu/instructions/jump/conditions.rs
|
UTF-8
| 3,242 | 2.828125 | 3 |
[] |
no_license
|
use super::super::utils::{check_half_carry};
use super::super::super::instruction_context::InstructionContext;
pub fn always(context: &InstructionContext) -> bool {
return true;
}
pub fn is_carry_flag_off(context: &InstructionContext) -> bool {
return context.registers().carry_flag.get() == false;
}
pub fn is_carry_flag_on(context: &InstructionContext) -> bool {
return context.registers().carry_flag.get();
}
pub fn is_zero_flag_off(context: &InstructionContext) -> bool {
return context.registers().zero_flag.get() == false;
}
pub fn is_zero_flag_on(context: &InstructionContext) -> bool {
return context.registers().zero_flag.get();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::as_hex;
use crate::cpu::testing::build_test_instruction_context;
#[test]
fn test_is_carry_flag_off_flag_off_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.reset();
let result = is_carry_flag_off(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_carry_flag_off_flag_on_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.activate();
let result = is_carry_flag_off(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_carry_flag_on_flag_off_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.reset();
let result = is_carry_flag_on(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_carry_flag_on_flag_on_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.activate();
let result = is_carry_flag_on(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_zero_flag_off_flag_off_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.reset();
let result = is_zero_flag_off(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_zero_flag_off_flag_on_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.activate();
let result = is_zero_flag_off(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_zero_flag_on_flag_off_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.reset();
let result = is_zero_flag_on(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_zero_flag_on_flag_on_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.activate();
let result = is_zero_flag_on(&context);
assert_eq!(result, true);
}
}
| true |
01efae5778d259345ebd37771f1ae6a59908cae6
|
Rust
|
aicacia/rs-lexer
|
/src/token.rs
|
UTF-8
| 714 | 2.9375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use super::TokenMeta;
#[derive(Serialize, Deserialize, Clone, PartialEq, Debug, Eq, PartialOrd, Ord, Hash)]
pub struct Token<T> {
meta: TokenMeta,
value: T,
}
unsafe impl<T> Send for Token<T> where T: Send {}
unsafe impl<T> Sync for Token<T> where T: Sync {}
impl<T> Token<T> {
#[inline(always)]
pub fn new(meta: TokenMeta, value: T) -> Self {
Token {
meta: meta,
value: value,
}
}
#[inline(always)]
pub fn meta(&self) -> &TokenMeta {
&self.meta
}
#[inline(always)]
pub fn into_meta(self) -> TokenMeta {
self.meta
}
#[inline(always)]
pub fn value(&self) -> &T {
&self.value
}
#[inline(always)]
pub fn into_value(self) -> T {
self.value
}
}
| true |
73dafda5b5dd20fc668be9760d08a4cdbd4a9861
|
Rust
|
chromium/chromium
|
/third_party/rust/getrandom/v0_2/crate/src/error.rs
|
UTF-8
| 8,110 | 2.640625 | 3 |
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later"
] |
permissive
|
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::{fmt, num::NonZeroU32};
/// A small and `no_std` compatible error type
///
/// The [`Error::raw_os_error()`] will indicate if the error is from the OS, and
/// if so, which error code the OS gave the application. If such an error is
/// encountered, please consult with your system documentation.
///
/// Internally this type is a NonZeroU32, with certain values reserved for
/// certain purposes, see [`Error::INTERNAL_START`] and [`Error::CUSTOM_START`].
///
/// *If this crate's `"std"` Cargo feature is enabled*, then:
/// - [`getrandom::Error`][Error] implements
/// [`std::error::Error`](https://doc.rust-lang.org/std/error/trait.Error.html)
/// - [`std::io::Error`](https://doc.rust-lang.org/std/io/struct.Error.html) implements
/// [`From<getrandom::Error>`](https://doc.rust-lang.org/std/convert/trait.From.html).
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct Error(NonZeroU32);
const fn internal_error(n: u16) -> Error {
// SAFETY: code > 0 as INTERNAL_START > 0 and adding n won't overflow a u32.
let code = Error::INTERNAL_START + (n as u32);
Error(unsafe { NonZeroU32::new_unchecked(code) })
}
impl Error {
/// This target/platform is not supported by `getrandom`.
pub const UNSUPPORTED: Error = internal_error(0);
/// The platform-specific `errno` returned a non-positive value.
pub const ERRNO_NOT_POSITIVE: Error = internal_error(1);
/// Call to iOS [`SecRandomCopyBytes`](https://developer.apple.com/documentation/security/1399291-secrandomcopybytes) failed.
pub const IOS_SEC_RANDOM: Error = internal_error(3);
/// Call to Windows [`RtlGenRandom`](https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom) failed.
pub const WINDOWS_RTL_GEN_RANDOM: Error = internal_error(4);
/// RDRAND instruction failed due to a hardware issue.
pub const FAILED_RDRAND: Error = internal_error(5);
/// RDRAND instruction unsupported on this target.
pub const NO_RDRAND: Error = internal_error(6);
/// The environment does not support the Web Crypto API.
pub const WEB_CRYPTO: Error = internal_error(7);
/// Calling Web Crypto API `crypto.getRandomValues` failed.
pub const WEB_GET_RANDOM_VALUES: Error = internal_error(8);
/// On VxWorks, call to `randSecure` failed (random number generator is not yet initialized).
pub const VXWORKS_RAND_SECURE: Error = internal_error(11);
/// Node.js does not have the `crypto` CommonJS module.
pub const NODE_CRYPTO: Error = internal_error(12);
/// Calling Node.js function `crypto.randomFillSync` failed.
pub const NODE_RANDOM_FILL_SYNC: Error = internal_error(13);
/// Called from an ES module on Node.js. This is unsupported, see:
/// <https://docs.rs/getrandom#nodejs-es-module-support>.
pub const NODE_ES_MODULE: Error = internal_error(14);
/// Codes below this point represent OS Errors (i.e. positive i32 values).
/// Codes at or above this point, but below [`Error::CUSTOM_START`] are
/// reserved for use by the `rand` and `getrandom` crates.
pub const INTERNAL_START: u32 = 1 << 31;
/// Codes at or above this point can be used by users to define their own
/// custom errors.
pub const CUSTOM_START: u32 = (1 << 31) + (1 << 30);
/// Extract the raw OS error code (if this error came from the OS)
///
/// This method is identical to [`std::io::Error::raw_os_error()`][1], except
/// that it works in `no_std` contexts. If this method returns `None`, the
/// error value can still be formatted via the `Display` implementation.
///
/// [1]: https://doc.rust-lang.org/std/io/struct.Error.html#method.raw_os_error
#[inline]
pub fn raw_os_error(self) -> Option<i32> {
if self.0.get() < Self::INTERNAL_START {
match () {
#[cfg(target_os = "solid_asp3")]
// On SOLID, negate the error code again to obtain the original
// error code.
() => Some(-(self.0.get() as i32)),
#[cfg(not(target_os = "solid_asp3"))]
() => Some(self.0.get() as i32),
}
} else {
None
}
}
/// Extract the bare error code.
///
/// This code can either come from the underlying OS, or be a custom error.
/// Use [`Error::raw_os_error()`] to disambiguate.
#[inline]
pub const fn code(self) -> NonZeroU32 {
self.0
}
}
cfg_if! {
if #[cfg(unix)] {
fn os_err(errno: i32, buf: &mut [u8]) -> Option<&str> {
let buf_ptr = buf.as_mut_ptr() as *mut libc::c_char;
if unsafe { libc::strerror_r(errno, buf_ptr, buf.len()) } != 0 {
return None;
}
// Take up to trailing null byte
let n = buf.len();
let idx = buf.iter().position(|&b| b == 0).unwrap_or(n);
core::str::from_utf8(&buf[..idx]).ok()
}
} else {
fn os_err(_errno: i32, _buf: &mut [u8]) -> Option<&str> {
None
}
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dbg = f.debug_struct("Error");
if let Some(errno) = self.raw_os_error() {
dbg.field("os_error", &errno);
let mut buf = [0u8; 128];
if let Some(err) = os_err(errno, &mut buf) {
dbg.field("description", &err);
}
} else if let Some(desc) = internal_desc(*self) {
dbg.field("internal_code", &self.0.get());
dbg.field("description", &desc);
} else {
dbg.field("unknown_code", &self.0.get());
}
dbg.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(errno) = self.raw_os_error() {
let mut buf = [0u8; 128];
match os_err(errno, &mut buf) {
Some(err) => err.fmt(f),
None => write!(f, "OS Error: {}", errno),
}
} else if let Some(desc) = internal_desc(*self) {
f.write_str(desc)
} else {
write!(f, "Unknown Error: {}", self.0.get())
}
}
}
impl From<NonZeroU32> for Error {
fn from(code: NonZeroU32) -> Self {
Self(code)
}
}
fn internal_desc(error: Error) -> Option<&'static str> {
match error {
Error::UNSUPPORTED => Some("getrandom: this target is not supported"),
Error::ERRNO_NOT_POSITIVE => Some("errno: did not return a positive value"),
Error::IOS_SEC_RANDOM => Some("SecRandomCopyBytes: iOS Security framework failure"),
Error::WINDOWS_RTL_GEN_RANDOM => Some("RtlGenRandom: Windows system function failure"),
Error::FAILED_RDRAND => Some("RDRAND: failed multiple times: CPU issue likely"),
Error::NO_RDRAND => Some("RDRAND: instruction not supported"),
Error::WEB_CRYPTO => Some("Web Crypto API is unavailable"),
Error::WEB_GET_RANDOM_VALUES => Some("Calling Web API crypto.getRandomValues failed"),
Error::VXWORKS_RAND_SECURE => Some("randSecure: VxWorks RNG module is not initialized"),
Error::NODE_CRYPTO => Some("Node.js crypto CommonJS module is unavailable"),
Error::NODE_RANDOM_FILL_SYNC => Some("Calling Node.js API crypto.randomFillSync failed"),
Error::NODE_ES_MODULE => Some("Node.js ES modules are not directly supported, see https://docs.rs/getrandom#nodejs-es-module-support"),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::Error;
use core::mem::size_of;
#[test]
fn test_size() {
assert_eq!(size_of::<Error>(), 4);
assert_eq!(size_of::<Result<(), Error>>(), 4);
}
}
| true |
454b656776ef9ea552c3c6d8a0b47eb502367581
|
Rust
|
vanhtuan0409/aoc
|
/2022/src/bin/day6/main.rs
|
UTF-8
| 595 | 2.5625 | 3 |
[] |
no_license
|
use aoc_2022::get_input_file;
use itertools::Itertools;
use std::fs::File;
use std::io::{self, BufRead};
fn main() {
let f: io::Result<File> = get_input_file!("input1.txt");
let r = io::BufReader::new(f.unwrap());
r.lines().map(|line| line.unwrap()).for_each(|line| {
println!("======");
let chars = line.chars().collect_vec();
let (idx, _) = chars
.windows(14)
.enumerate()
.find(|(_, window)| window.iter().all_unique())
.unwrap();
println!("original signal: {}. Idx {}", line, idx + 14);
});
}
| true |
01839d034cd249a1e9dc8685da3c8a47731c3bf0
|
Rust
|
ErisMik/minecator
|
/src/minecraft/chunk.rs
|
UTF-8
| 786 | 2.75 | 3 |
[] |
no_license
|
use byteorder::{BigEndian, ByteOrder};
use nbt;
use std::io::Cursor;
#[derive(Debug)]
pub struct Chunk {
timestamp: u32,
blob: nbt::Blob,
}
impl Chunk {
pub fn new(timestamp: u32, data: Vec<u8>) -> std::io::Result<Chunk> {
let chunk_length = BigEndian::read_u32(&data[0..4]) as usize;
let compression_type = u8::from_be(data[4]);
let mut data_reader = Cursor::new(&data[5..chunk_length]);
let nbt_data = match compression_type {
1 => nbt::Blob::from_gzip_reader(&mut data_reader)?,
2 => nbt::Blob::from_zlib_reader(&mut data_reader)?,
_ => nbt::Blob::from_reader(&mut data_reader)?,
};
return Ok(Chunk {
timestamp: timestamp,
blob: nbt_data,
});
}
}
| true |
d1827d3f85c8e79749a0f2c183e4134b86d74ae0
|
Rust
|
femnad/leth
|
/src/main.rs
|
UTF-8
| 2,851 | 2.5625 | 3 |
[] |
no_license
|
extern crate regex;
extern crate skim;
extern crate structopt;
use std::collections::HashMap;
use std::io::Cursor;
use std::io::{self, Read};
use std::process::{Command, Stdio};
use regex::Regex;
use skim::prelude::*;
use structopt::StructOpt;
const LINE_SPLITTER: char = '=';
const URL_REGEX: &str = r"(http(s)?://[a-zA-Z0-9_/?+&.=@%#;~:-]+)";
#[derive(Debug, StructOpt)]
#[structopt(name = "leth", about = "URL extractor intended to be used within mutt")]
struct Opt {}
pub fn main() {
Opt::from_args();
let options = SkimOptionsBuilder::default()
.multi(true)
.bind(vec!["ctrl-k:kill-line"])
.build()
.unwrap();
let re = Regex::new(URL_REGEX).unwrap();
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).unwrap();
let lines = buffer.split("\n");
let mut split_lines = false;
let mut split_line_buffer: Vec<&str> = Vec::new();
let mut merged_lines: Vec<String> = Vec::new();
for line in lines {
if line.len() == 0 {
continue
}
if line.ends_with(LINE_SPLITTER) {
let mergable = line.get(0..line.len() - 1).unwrap_or("");
split_line_buffer.push(mergable);
split_lines = true;
continue;
}
if split_lines {
split_lines = false;
split_line_buffer.push(line);
let merged_line = &split_line_buffer.join("");
merged_lines.push(merged_line.to_string());
split_line_buffer = Vec::new();
} else {
merged_lines.push(line.to_string());
}
}
let mut matches: HashMap<String, u8> = HashMap::new();
let mut match_index = 1;
for line in merged_lines {
let sanitized = line.replace("=3D", "=");
for capture in re.captures_iter(&sanitized) {
let url_match = capture.get(1).unwrap().as_str();
if matches.contains_key(url_match) {
continue;
}
matches.insert(url_match.to_string(), match_index);
match_index += 1;
}
}
let mut ordered_items: Vec<_> = matches.into_iter().collect();
ordered_items.sort_by(|a, b| a.1.cmp(&b.1));
let item_list: Vec<_> = ordered_items.iter().map(|item| item.0.as_str()).collect();
let items = item_list.join("\n");
let item_reader = SkimItemReader::default();
let items = item_reader.of_bufread(Cursor::new(items));
let output = Skim::run_with(&options, Some(items)).unwrap();
if output.is_abort {
return;
}
for item in output.selected_items.iter() {
let url = item.clone();
Command::new("firefox")
.arg(url.output().as_ref())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.unwrap();
}
}
| true |
45ab8b0a952d1ebd042a38d187027ee3fb551320
|
Rust
|
DanMaycock/fortunes_algorithm_rs
|
/src/boundingbox.rs
|
UTF-8
| 17,186 | 3.359375 | 3 |
[] |
no_license
|
use super::*;
use std::f64;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Side {
Left,
Right,
Top,
Bottom,
None,
}
impl Side {
// Iterates round the sides in an anti clockwise direction
fn next(self) -> Side {
match self {
Side::Left => Side::Bottom,
Side::Top => Side::Left,
Side::Right => Side::Top,
Side::Bottom => Side::Right,
Side::None => Side::None,
}
}
}
#[derive(Debug)]
pub struct BoundingBox {
left: f64,
right: f64,
top: f64,
bottom: f64,
}
impl BoundingBox {
pub fn new(left: f64, right: f64, top: f64, bottom: f64) -> Self {
BoundingBox {
left,
right,
top,
bottom,
}
}
pub fn contains(&self, point: &cgmath::Point2<f64>) -> bool {
(point.x >= self.left)
&& (point.x <= self.right)
&& (point.y >= self.top)
&& (point.y <= self.bottom)
}
pub fn get_intersection(&self, origin: &cgmath::Point2<f64>, direction: &cgmath::Vector2<f64>) -> (cgmath::Point2<f64>, Side) {
assert!(self.contains(origin));
let (t1, side1) = if direction.x < 0.0 {
((self.right - origin.x) / direction.x, Side::Right)
} else if direction.x > 0.0 {
((self.left - origin.x) / direction.x, Side::Left)
} else {
(f64::MIN, Side::None)
};
let (t2, side2) = if direction.y > 0.0 {
((self.top - origin.y) / direction.y, Side::Top)
} else if direction.y < 0.0 {
((self.bottom - origin.y) / direction.y, Side::Bottom)
} else {
(f64::MAX, Side::None)
};
let (t, side) = if t2.abs() < t1.abs() {
(t2, side2)
} else {
(t1, side1)
};
(*origin + (*direction * t), side)
}
pub fn get_corner(&self, side_1: Side, side_2: Side) -> cgmath::Point2<f64> {
match (side_1, side_2) {
(Side::Top, Side::Left) | (Side::Left, Side::Top) => self.get_top_left(),
(Side::Top, Side::Right) | (Side::Right, Side::Top) => self.get_top_right(),
(Side::Bottom, Side::Left) | (Side::Left, Side::Bottom) => self.get_bottom_left(),
(Side::Bottom, Side::Right) | (Side::Right, Side::Bottom) => self.get_bottom_right(),
_ => panic!("Invalid corner sides"),
}
}
pub fn get_top_left(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.left, self.top)
}
pub fn get_top_right(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.right, self.top)
}
pub fn get_bottom_left(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.left, self.bottom)
}
pub fn get_bottom_right(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.right, self.bottom)
}
pub fn get_intersections(
&self,
origin: &cgmath::Point2<f64>,
destination: &cgmath::Point2<f64>,
) -> Vec<(cgmath::Point2<f64>, Side)> {
let mut intersections = vec![];
let direction = *destination - *origin;
// Left
if origin.x < self.left || destination.x < self.left {
let t = (self.left - origin.x) / direction.x;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.y >= self.top && intersection_pt.y <= self.bottom {
intersections.push((intersection_pt, Side::Left));
}
}
}
// Right
if origin.x > self.right || destination.x > self.right {
let t = (self.right - origin.x) / direction.x;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.y >= self.top && intersection_pt.y <= self.bottom {
intersections.push((intersection_pt, Side::Right));
}
}
}
// Top
if origin.y < self.top || destination.y < self.top {
let t = (self.top - origin.y) / direction.y;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.x <= self.right && intersection_pt.x >= self.left {
intersections.push((intersection_pt, Side::Top));
}
}
}
// Bottom
if origin.y > self.bottom || destination.y > self.bottom {
let t = (self.bottom - origin.y) / direction.y;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.x <= self.right && intersection_pt.x >= self.left {
intersections.push((intersection_pt, Side::Bottom));
}
}
}
intersections
}
pub fn intersect_diagram(&self, voronoi: &mut Diagram) {
let mut vertices_to_remove = vec![];
let mut half_edges_to_remove = vec![];
let mut processed_half_edges = vec![];
for face in voronoi.get_face_indices() {
let start_half_edge = voronoi.get_face_outer_component(face).unwrap();
let mut outgoing_half_edge: Option<HalfEdgeKey> = None;
let mut outgoing_side = Side::None;
let mut incoming_half_edge: Option<HalfEdgeKey> = None;
let mut incoming_side = Side::None;
let mut half_edge = start_half_edge;
loop {
let origin = voronoi.get_half_edge_origin(half_edge).unwrap();
let destination = voronoi.get_half_edge_destination(half_edge).unwrap();
let inside = self.contains(&voronoi.get_vertex_point(origin));
let next_inside = self.contains(&voronoi.get_vertex_point(destination));
let next_half_edge = voronoi.get_half_edge_next(half_edge).unwrap();
if !inside || !next_inside {
let intersections = self.get_intersections(
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination),
);
if !inside && !next_inside {
// Both points are outside the box
if intersections.is_empty() {
// The edge is outside the box
vertices_to_remove.push(origin);
if Some(half_edge) == voronoi.get_face_outer_component(face) {
// Update the outer component before we delete the half_edge
voronoi.set_face_outer_component(
face,
voronoi.get_half_edge_next(half_edge),
);
}
half_edges_to_remove.push(half_edge);
} else if intersections.len() == 2 {
// The edge crosses the bounds of the box twice
vertices_to_remove.push(origin);
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_origin(
half_edge,
voronoi.get_half_edge_destination(half_edge_twin.unwrap()),
);
voronoi.set_half_edge_destination(
half_edge,
voronoi.get_half_edge_origin(half_edge_twin.unwrap()),
);
} else {
let origin = voronoi.add_vertex(intersections[0].0);
let destination = voronoi.add_vertex(intersections[1].0);
voronoi.set_half_edge_origin(half_edge, Some(origin));
voronoi.set_half_edge_destination(half_edge, Some(destination));
}
if outgoing_half_edge.is_some() {
self.link_vertices(
voronoi,
outgoing_half_edge.unwrap(),
outgoing_side,
half_edge,
intersections[0].1,
)
}
outgoing_half_edge = Some(half_edge);
outgoing_side = intersections[1].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
} else if inside && !next_inside {
// Edge is going outside the box
if intersections.len() == 1 {
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_destination(
half_edge,
voronoi.get_half_edge_origin(half_edge_twin.unwrap()),
);
} else {
let destination = voronoi.add_vertex(intersections[0].0);
voronoi.set_half_edge_destination(half_edge, Some(destination));
}
if incoming_half_edge.is_some() {
self.link_vertices(
voronoi,
half_edge,
intersections[0].1,
incoming_half_edge.unwrap(),
incoming_side,
)
}
outgoing_half_edge = Some(half_edge);
outgoing_side = intersections[0].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
} else if !inside && next_inside {
// Edge is coming into the box
if intersections.len() == 1 {
vertices_to_remove.push(origin);
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_origin(
half_edge,
voronoi.get_half_edge_destination(half_edge_twin.unwrap()),
);
} else {
let origin = voronoi.add_vertex(intersections[0].0);
voronoi.set_half_edge_origin(half_edge, Some(origin));
}
if outgoing_half_edge.is_some() {
self.link_vertices(
voronoi,
outgoing_half_edge.unwrap(),
outgoing_side,
half_edge,
intersections[0].1,
)
}
incoming_half_edge = Some(half_edge);
incoming_side = intersections[0].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
}
}
if next_half_edge == start_half_edge {
// Back where we started so break out of the loop
break;
}
half_edge = next_half_edge;
}
}
for half_edge in half_edges_to_remove {
voronoi.remove_half_edge(half_edge);
}
for vertex in vertices_to_remove {
voronoi.remove_vertex(vertex);
}
}
pub fn link_vertices(
&self,
voronoi: &mut Diagram,
start_edge: HalfEdgeKey,
start_side: Side,
end_edge: HalfEdgeKey,
end_side: Side,
) {
let mut edge = start_edge;
let mut side = start_side;
let incident_face = voronoi.get_half_edge_incident_face(edge).unwrap();
while side != end_side {
let new_edge = voronoi.add_half_edge(incident_face);
voronoi.link_half_edges(edge, new_edge);
voronoi.set_half_edge_origin(new_edge, voronoi.get_half_edge_destination(edge));
let destination = voronoi.add_vertex(self.get_corner(side, side.next()));
voronoi.set_half_edge_destination(new_edge, Some(destination));
side = side.next();
edge = new_edge;
}
let new_edge = voronoi.add_half_edge(incident_face);
voronoi.link_half_edges(edge, new_edge);
voronoi.link_half_edges(new_edge, end_edge);
voronoi.set_half_edge_origin(new_edge, voronoi.get_half_edge_destination(edge));
voronoi.set_half_edge_destination(new_edge, voronoi.get_half_edge_origin(end_edge));
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn contains_test() {
let bbox = BoundingBox::new(0.0, 1.0, 0.0, 1.0);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, 0.5)), true);
assert_eq!(bbox.contains(&cgmath::Point2::new(1.5, 0.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(-0.5, 0.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, 1.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, -0.5)), false);
}
#[test]
fn intersections_test() {
let bbox = BoundingBox::new(0.0, 1.0, 0.0, 1.0);
let origin = cgmath::Point2::new(1.5, 0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(0.5, 1.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(0.5, -0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(-0.5, 0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(-0.5, 0.5);
let destination = cgmath::Point2::new(1.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 2);
let origin = cgmath::Point2::new(0.5, -0.5);
let destination = cgmath::Point2::new(0.5, 1.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 2);
}
}
| true |
13aae36a5299459fc447b9505026e6a796e511ad
|
Rust
|
Xudong-Huang/radiotap
|
/src/lib.rs
|
UTF-8
| 12,446 | 3.234375 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! A parser for the [Radiotap](http://www.radiotap.org/) capture format.
//!
//! # Usage
//!
//! The `Radiotap::from_bytes(&capture)` constructor will parse all present
//! fields into a [Radiotap](struct.Radiotap.html) struct:
//!
//! ```
//! use radiotap::Radiotap;
//!
//! fn main() {
//! let capture = [
//! 0, 0, 56, 0, 107, 8, 52, 0, 185, 31, 155, 154, 0, 0, 0, 0, 20, 0, 124, 21, 64, 1, 213,
//! 166, 1, 0, 0, 0, 64, 1, 1, 0, 124, 21, 100, 34, 249, 1, 0, 0, 0, 0, 0, 0, 255, 1, 80,
//! 4, 115, 0, 0, 0, 1, 63, 0, 0,
//! ];
//!
//! let radiotap = Radiotap::from_bytes(&capture).unwrap();
//! println!("{:?}", radiotap.vht);
//! }
//! ```
//!
//! If you just want to parse a few specific fields from the Radiotap capture
//! you can create an iterator using `RadiotapIterator::from_bytes(&capture)`:
//!
//! ```
//! use radiotap::{field, RadiotapIterator};
//!
//! fn main() {
//! let capture = [
//! 0, 0, 56, 0, 107, 8, 52, 0, 185, 31, 155, 154, 0, 0, 0, 0, 20, 0, 124, 21, 64, 1, 213,
//! 166, 1, 0, 0, 0, 64, 1, 1, 0, 124, 21, 100, 34, 249, 1, 0, 0, 0, 0, 0, 0, 255, 1, 80,
//! 4, 115, 0, 0, 0, 1, 63, 0, 0,
//! ];
//!
//! for element in RadiotapIterator::from_bytes(&capture).unwrap() {
//! match element {
//! Ok((field::Kind::VHT, data)) => {
//! let vht: field::VHT = field::from_bytes(data).unwrap();
//! println!("{:?}", vht);
//! }
//! _ => {}
//! }
//! }
//! }
//! ```
pub mod field;
use std::{io::Cursor, result};
use quick_error::quick_error;
use crate::field::*;
quick_error! {
/// All errors returned and used by the radiotap module.
#[derive(Debug)]
pub enum Error {
/// The internal cursor on the data returned an IO error.
ParseError(err: std::io::Error) {
from()
source(err)
description(err.description())
}
/// The given data is not a complete Radiotap capture.
IncompleteError {
display("The given data is not a complete Radiotap capture")
}
/// The given data is shorter than the amount specified in the Radiotap header.
InvalidLength {
display("The given data is shorter than the amount specified in the Radiotap header")
}
/// The given data is not a valid Radiotap capture.
InvalidFormat {
display("The given data is not a valid Radiotap capture")
}
/// Unsupported Radiotap header version.
UnsupportedVersion {
display("Unsupported Radiotap header version")
}
/// Unsupported Radiotap field.
UnsupportedField {
display("Unsupported Radiotap field")
}
}
}
type Result<T> = result::Result<T, Error>;
/// A trait to align an offset to particular word size, usually 1, 2, 4, or 8.
trait Align {
/// Aligns the offset to `align` size.
fn align(&mut self, align: u64);
}
impl<T> Align for Cursor<T> {
/// Aligns the Cursor position to `align` size.
fn align(&mut self, align: u64) {
let p = self.position();
self.set_position((p + align - 1) & !(align - 1));
}
}
/// Represents an unparsed Radiotap capture format, only the header field is
/// parsed.
#[derive(Debug, Clone)]
pub struct RadiotapIterator<'a> {
header: Header,
data: &'a [u8],
}
impl<'a> RadiotapIterator<'a> {
pub fn from_bytes(input: &'a [u8]) -> Result<RadiotapIterator<'a>> {
Ok(RadiotapIterator::parse(input)?.0)
}
pub fn parse(input: &'a [u8]) -> Result<(RadiotapIterator<'a>, &'a [u8])> {
let header: Header = from_bytes(input)?;
let (data, rest) = input.split_at(header.length);
Ok((RadiotapIterator { header, data }, rest))
}
}
/// An iterator over Radiotap fields.
#[doc(hidden)]
#[derive(Debug, Clone)]
pub struct RadiotapIteratorIntoIter<'a> {
present: Vec<Kind>,
cursor: Cursor<&'a [u8]>,
}
impl<'a> IntoIterator for &'a RadiotapIterator<'a> {
type IntoIter = RadiotapIteratorIntoIter<'a>;
type Item = Result<(Kind, &'a [u8])>;
fn into_iter(self) -> Self::IntoIter {
let present = self.header.present.iter().rev().cloned().collect();
let mut cursor = Cursor::new(self.data);
cursor.set_position(self.header.size as u64);
RadiotapIteratorIntoIter { present, cursor }
}
}
impl<'a> IntoIterator for RadiotapIterator<'a> {
type IntoIter = RadiotapIteratorIntoIter<'a>;
type Item = Result<(Kind, &'a [u8])>;
fn into_iter(self) -> Self::IntoIter {
let present = self.header.present.iter().rev().cloned().collect();
let mut cursor = Cursor::new(self.data);
cursor.set_position(self.header.size as u64);
RadiotapIteratorIntoIter { present, cursor }
}
}
impl<'a> Iterator for RadiotapIteratorIntoIter<'a> {
type Item = Result<(Kind, &'a [u8])>;
fn next(&mut self) -> Option<Self::Item> {
match self.present.pop() {
Some(mut kind) => {
// Align the cursor to the current field's needed alignment.
self.cursor.align(kind.align());
let mut start = self.cursor.position() as usize;
let mut end = start + kind.size();
// The header lied about how long the body was
if end > self.cursor.get_ref().len() {
Some(Err(Error::IncompleteError))
} else {
// Switching to a vendor namespace, and we don't know how to handle
// so we just return the entire vendor namespace section
if kind == Kind::VendorNamespace(None) {
match VendorNamespace::from_bytes(&self.cursor.get_ref()[start..end]) {
Ok(vns) => {
start += kind.size();
end += vns.skip_length as usize;
kind = Kind::VendorNamespace(Some(vns));
}
Err(e) => return Some(Err(e)),
}
}
let data = &self.cursor.get_ref()[start..end];
self.cursor.set_position(end as u64);
Some(Ok((kind, data)))
}
}
None => None,
}
}
}
impl Default for Header {
fn default() -> Header {
Header {
version: 0,
length: 8,
present: Vec::new(),
size: 8,
}
}
}
/// Represents a parsed Radiotap capture, including the parsed header and all
/// fields as Option members.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct Radiotap {
pub header: Header,
pub tsft: Option<TSFT>,
pub flags: Option<Flags>,
pub rate: Option<Rate>,
pub channel: Option<Channel>,
pub fhss: Option<FHSS>,
pub antenna_signal: Option<AntennaSignal>,
pub antenna_noise: Option<AntennaNoise>,
pub lock_quality: Option<LockQuality>,
pub tx_attenuation: Option<TxAttenuation>,
pub tx_attenuation_db: Option<TxAttenuationDb>,
pub tx_power: Option<TxPower>,
pub antenna: Option<Antenna>,
pub antenna_signal_db: Option<AntennaSignalDb>,
pub antenna_noise_db: Option<AntennaNoiseDb>,
pub rx_flags: Option<RxFlags>,
pub tx_flags: Option<TxFlags>,
pub rts_retries: Option<RTSRetries>,
pub data_retries: Option<DataRetries>,
pub xchannel: Option<XChannel>,
pub mcs: Option<MCS>,
pub ampdu_status: Option<AMPDUStatus>,
pub vht: Option<VHT>,
pub timestamp: Option<Timestamp>,
}
impl Radiotap {
/// Returns the parsed [Radiotap](struct.Radiotap.html) from an input byte
/// array.
pub fn from_bytes(input: &[u8]) -> Result<Radiotap> {
Ok(Radiotap::parse(input)?.0)
}
/// Returns the parsed [Radiotap](struct.Radiotap.html) and remaining data
/// from an input byte array.
pub fn parse(input: &[u8]) -> Result<(Radiotap, &[u8])> {
let (iterator, rest) = RadiotapIterator::parse(input)?;
let mut radiotap = Radiotap {
header: iterator.header.clone(),
..Default::default()
};
for result in &iterator {
let (field_kind, data) = result?;
match field_kind {
Kind::TSFT => radiotap.tsft = from_bytes_some(data)?,
Kind::Flags => radiotap.flags = from_bytes_some(data)?,
Kind::Rate => radiotap.rate = from_bytes_some(data)?,
Kind::Channel => radiotap.channel = from_bytes_some(data)?,
Kind::FHSS => radiotap.fhss = from_bytes_some(data)?,
Kind::AntennaSignal => radiotap.antenna_signal = from_bytes_some(data)?,
Kind::AntennaNoise => radiotap.antenna_noise = from_bytes_some(data)?,
Kind::LockQuality => radiotap.lock_quality = from_bytes_some(data)?,
Kind::TxAttenuation => radiotap.tx_attenuation = from_bytes_some(data)?,
Kind::TxAttenuationDb => radiotap.tx_attenuation_db = from_bytes_some(data)?,
Kind::TxPower => radiotap.tx_power = from_bytes_some(data)?,
Kind::Antenna => radiotap.antenna = from_bytes_some(data)?,
Kind::AntennaSignalDb => radiotap.antenna_signal_db = from_bytes_some(data)?,
Kind::AntennaNoiseDb => radiotap.antenna_noise_db = from_bytes_some(data)?,
Kind::RxFlags => radiotap.rx_flags = from_bytes_some(data)?,
Kind::TxFlags => radiotap.tx_flags = from_bytes_some(data)?,
Kind::RTSRetries => radiotap.rts_retries = from_bytes_some(data)?,
Kind::DataRetries => radiotap.data_retries = from_bytes_some(data)?,
Kind::XChannel => radiotap.xchannel = from_bytes_some(data)?,
Kind::MCS => radiotap.mcs = from_bytes_some(data)?,
Kind::AMPDUStatus => radiotap.ampdu_status = from_bytes_some(data)?,
Kind::VHT => radiotap.vht = from_bytes_some(data)?,
Kind::Timestamp => radiotap.timestamp = from_bytes_some(data)?,
_ => {}
}
}
Ok((radiotap, rest))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn good_vendor() {
let frame = [
0, 0, 39, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
assert_eq!(
Radiotap::from_bytes(&frame).unwrap().rate.unwrap(),
Rate { value: 2.0 }
);
}
#[test]
fn bad_version() {
let frame = [
1, 0, 39, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::UnsupportedVersion => {}
e => panic!("Error not UnsupportedVersion: {:?}", e),
};
}
#[test]
fn bad_header_length() {
let frame = [
0, 0, 40, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::InvalidLength => {}
e => panic!("Error not InvalidLength: {:?}", e),
};
}
#[test]
fn bad_actual_length() {
let frame = [
0, 0, 39, 0, 47, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::IncompleteError => {}
e => panic!("Error not IncompleteError: {:?}", e),
};
}
#[test]
fn bad_vendor() {
let frame = [
0, 0, 34, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::IncompleteError => {}
e => panic!("Error not IncompleteError: {:?}", e),
};
}
}
| true |
f44f5a594c915e78aba56d4941978cd9c394ea29
|
Rust
|
BurntSushi/rust-analyzer
|
/xtask/src/codegen.rs
|
UTF-8
| 2,791 | 2.71875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! We use code generation heavily in rust-analyzer.
//!
//! Rather then doing it via proc-macros, we use old-school way of just dumping
//! the source code.
//!
//! This module's submodules define specific bits that we generate.
mod gen_syntax;
mod gen_parser_tests;
mod gen_assists_docs;
use std::{mem, path::Path};
use crate::{not_bash::fs2, Result};
pub use self::{
gen_assists_docs::generate_assists_docs, gen_parser_tests::generate_parser_tests,
gen_syntax::generate_syntax,
};
const GRAMMAR_DIR: &str = "crates/ra_parser/src/grammar";
const OK_INLINE_TESTS_DIR: &str = "crates/ra_syntax/test_data/parser/inline/ok";
const ERR_INLINE_TESTS_DIR: &str = "crates/ra_syntax/test_data/parser/inline/err";
const SYNTAX_KINDS: &str = "crates/ra_parser/src/syntax_kind/generated.rs";
const AST_NODES: &str = "crates/ra_syntax/src/ast/generated/nodes.rs";
const AST_TOKENS: &str = "crates/ra_syntax/src/ast/generated/tokens.rs";
const ASSISTS_DIR: &str = "crates/ra_assists/src/handlers";
const ASSISTS_TESTS: &str = "crates/ra_assists/src/doc_tests/generated.rs";
const ASSISTS_DOCS: &str = "docs/user/assists.md";
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Mode {
Overwrite,
Verify,
}
/// A helper to update file on disk if it has changed.
/// With verify = false,
fn update(path: &Path, contents: &str, mode: Mode) -> Result<()> {
match fs2::read_to_string(path) {
Ok(ref old_contents) if normalize(old_contents) == normalize(contents) => {
return Ok(());
}
_ => (),
}
if mode == Mode::Verify {
anyhow::bail!("`{}` is not up-to-date", path.display());
}
eprintln!("updating {}", path.display());
fs2::write(path, contents)?;
return Ok(());
fn normalize(s: &str) -> String {
s.replace("\r\n", "\n")
}
}
fn extract_comment_blocks(text: &str) -> Vec<Vec<String>> {
do_extract_comment_blocks(text, false)
}
fn extract_comment_blocks_with_empty_lines(text: &str) -> Vec<Vec<String>> {
do_extract_comment_blocks(text, true)
}
fn do_extract_comment_blocks(text: &str, allow_blocks_with_empty_lines: bool) -> Vec<Vec<String>> {
let mut res = Vec::new();
let prefix = "// ";
let lines = text.lines().map(str::trim_start);
let mut block = vec![];
for line in lines {
if line == "//" && allow_blocks_with_empty_lines {
block.push(String::new());
continue;
}
let is_comment = line.starts_with(prefix);
if is_comment {
block.push(line[prefix.len()..].to_string());
} else if !block.is_empty() {
res.push(mem::replace(&mut block, Vec::new()));
}
}
if !block.is_empty() {
res.push(mem::replace(&mut block, Vec::new()))
}
res
}
| true |
7ceb85523e95e3f83a1ecf50f5093ac8ad6e20c6
|
Rust
|
saschagrunert/craft
|
/src/sources/git/source.rs
|
UTF-8
| 7,727 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
use std::fmt::{self, Debug, Formatter};
use url::Url;
use dependency::Dependency;
use package::Package;
use package_id::PackageId;
use registry::Registry;
use source::{Source, SourceId, GitReference};
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
use summary::Summary;
use util::hex::short_hash;
use util::{CraftResult, Config};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
ident: String,
config: &'cfg Config,
}
impl<'cfg> GitSource<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config) -> GitSource<'cfg> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url());
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
};
GitSource {
remote: remote,
reference: reference,
source_id: source_id.clone(),
path_source: None,
rev: None,
ident: ident,
config: config,
}
}
pub fn url(&self) -> &Url {
self.remote.url()
}
pub fn read_packages(&mut self) -> CraftResult<Vec<Package>> {
if self.path_source.is_none() {
self.update()?;
}
self.path_source.as_mut().unwrap().read_packages()
}
}
fn ident(url: &Url) -> String {
let url = canonicalize_url(url);
let ident = url.path_segments().and_then(|mut s| s.next_back()).unwrap_or("");
let ident = if ident == "" { "_empty" } else { ident };
format!("{}-{}", ident, short_hash(&url))
}
// Some hacks and heuristics for making equivalent URLs hash the same
pub fn canonicalize_url(url: &Url) -> Url {
let mut url = url.clone();
// Strip a trailing slash
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// HACKHACK: For github URL's specifically just lowercase
// everything. GitHub treats both the same, but they hash
// differently, and we're gonna be hashing them. This wants a more
// general solution, and also we're almost certainly not using the
// same case conversion rules that GitHub does. (#84)
if url.host_str() == Some("github.com") {
url.set_scheme("https").unwrap();
let path = url.path().to_lowercase();
url.set_path(&path);
}
// Repos generally can be accessed with or w/o '.git'
let needs_chopping = url.path().ends_with(".git");
if needs_chopping {
let last = {
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
url
}
impl<'cfg> Debug for GitSource<'cfg> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "git repo at {}", self.remote.url())?;
match self.reference.to_ref_string() {
Some(s) => write!(f, " ({})", s),
None => Ok(()),
}
}
}
impl<'cfg> Registry for GitSource<'cfg> {
fn query(&mut self, dep: &Dependency) -> CraftResult<Vec<Summary>> {
let src = self.path_source
.as_mut()
.expect("BUG: update() must be called before query()");
src.query(dep)
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn update(&mut self) -> CraftResult<()> {
let lock = self.config
.git_path()
.open_rw(".craft-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
// Resolve our reference to an actual revision, and check if the
// database already has that revision. If it does, we just load a
// database pinned at that revision, and if we don't we issue an update
// to try to find the revision.
let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() || self.source_id.precise().is_none();
let (repo, actual_rev) = if should_update {
self.config
.shell()
.status("Updating",
format!("git repository `{}`", self.remote.url()))?;
trace!("updating git source `{:?}`", self.remote);
let repo = self.remote.checkout(&db_path, &self.config)?;
let rev = repo.rev_for(&self.reference)?;
(repo, rev)
} else {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
let checkout_path = lock.parent()
.join("checkouts")
.join(&self.ident)
.join(actual_rev.to_string());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
repo.copy_to(actual_rev.clone(), &checkout_path, &self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config);
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CraftResult<Package> {
trace!("getting packages for package id `{}` from `{:?}`",
id,
self.remote);
self.path_source
.as_mut()
.expect("BUG: update() must be called before get()")
.download(id)
}
fn fingerprint(&self, _pkg: &Package) -> CraftResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}
}
#[cfg(test)]
mod test {
use url::Url;
use super::ident;
use util::ToUrl;
#[test]
pub fn test_url_to_path_ident_with_path() {
let ident = ident(&url("https://github.com/carlhuda/craft"));
assert!(ident.starts_with("craft-"));
}
#[test]
pub fn test_url_to_path_ident_without_path() {
let ident = ident(&url("https://github.com"));
assert!(ident.starts_with("_empty-"));
}
#[test]
fn test_canonicalize_idents_by_stripping_trailing_url_slash() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/"));
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston"));
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_lowercasing_github_urls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston"));
let ident2 = ident(&url("https://github.com/pistondevelopers/piston"));
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_stripping_dot_git() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston"));
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git"));
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_different_protocls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston"));
let ident2 = ident(&url("git://github.com/PistonDevelopers/piston"));
assert_eq!(ident1, ident2);
}
fn url(s: &str) -> Url {
s.to_url().unwrap()
}
}
| true |
389532a8c5679f057d28fc3f24c1f7b1c992debc
|
Rust
|
JordanElButler/labyrinth
|
/src/render_object.rs
|
UTF-8
| 2,970 | 2.578125 | 3 |
[] |
no_license
|
use crate::transform::Transform;
use crate::shader::{Program};
use crate::mesh::Mesh;
use crate::camera::Camera;
use crate::vertex::{Vertex};
use crate::resources::{ResourceKey, Resources};
use crate::light::Light;
use crate::material::{MaterialPropertyType, Material};
pub struct RenderObject {
pub transform: Transform,
pub program_key: ResourceKey,
pub mesh_key: ResourceKey,
pub material: Material,
}
impl RenderObject {
pub fn new(res: &Resources, transform: Transform, program_name: &str, mesh_name: &str, material: Material) -> Self {
RenderObject {
transform,
program_key: res.get_program_id_by_name(program_name).unwrap(),
mesh_key: res.get_mesh_id_by_name(mesh_name).unwrap(),
material,
}
}
pub fn draw(&self, res: &Resources, camera: &Camera) {
let program = res.get_program(self.program_key).unwrap();
program.set_used();
program.setMat4fv("proj", camera.proj_mat().as_ptr()).unwrap();
program.setMat4fv("view", camera.view_mat().as_ptr()).unwrap();
program.setMat4fv("model", self.transform.model_mat().as_ptr()).unwrap();
program.setMat4fv("model_rot", self.transform.model_rot().as_ptr()).unwrap();
self.material.load_shader_data(res, &program);
let mesh = res.get_mesh(self.mesh_key).unwrap();
mesh.load();
mesh.bind();
mesh.draw();
crate::gl_util::gl_dump_errors();
}
}
// dumb experiment with instancing
pub struct TerrainChunkObject {
pub transform: Transform,
pub program_key: ResourceKey,
pub mesh_key: ResourceKey,
pub material: Material,
}
impl TerrainChunkObject {
pub fn new(res: &Resources, transform: Transform, program_name: &str, mesh_name: &str, material: Material) -> Self {
TerrainChunkObject {
transform,
program_key: res.get_program_id_by_name(program_name).unwrap(),
mesh_key: res.get_mesh_id_by_name(mesh_name).unwrap(),
material,
}
}
pub fn draw(&self, res: &Resources, camera: &Camera) {
let program = res.get_program(self.program_key).unwrap();
program.set_used();
program.setMat4fv("proj", camera.proj_mat().as_ptr()).unwrap();
program.setMat4fv("view", camera.view_mat().as_ptr()).unwrap();
program.setMat4fv("model", self.transform.model_mat().as_ptr()).unwrap();
program.setMat4fv("model_rot", self.transform.model_rot().as_ptr()).unwrap();
program.setMat4fv("view_rot", camera.view_rot().as_ptr()).unwrap();
let numX = 100;
let numZ = 500;
program.set1i("numX", numX);
program.set1i("numZ", numZ);
self.material.load_shader_data(res, &program);
let mesh = res.get_mesh(self.mesh_key).unwrap();
mesh.load();
mesh.bind();
mesh.instanced_draw(numX * numZ);
crate::gl_util::gl_dump_errors();
}
}
| true |
2b44b3c71892ab682c637c5588708a41a2b7c515
|
Rust
|
bevyengine/bevy
|
/crates/bevy_time/src/stopwatch.rs
|
UTF-8
| 6,086 | 3.65625 | 4 |
[
"Apache-2.0",
"MIT",
"Zlib",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
use bevy_reflect::prelude::*;
use bevy_reflect::Reflect;
use bevy_utils::Duration;
/// A Stopwatch is a struct that track elapsed time when started.
///
/// # Examples
///
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// assert_eq!(stopwatch.elapsed_secs(), 0.0);
///
/// stopwatch.tick(Duration::from_secs_f32(1.0)); // tick one second
/// assert_eq!(stopwatch.elapsed_secs(), 1.0);
///
/// stopwatch.pause();
/// stopwatch.tick(Duration::from_secs_f32(1.0)); // paused stopwatches don't tick
/// assert_eq!(stopwatch.elapsed_secs(), 1.0);
///
/// stopwatch.reset(); // reset the stopwatch
/// assert!(stopwatch.paused());
/// assert_eq!(stopwatch.elapsed_secs(), 0.0);
/// ```
#[derive(Clone, Debug, Default, PartialEq, Eq, Reflect)]
#[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))]
#[reflect(Default)]
pub struct Stopwatch {
elapsed: Duration,
paused: bool,
}
impl Stopwatch {
/// Create a new unpaused `Stopwatch` with no elapsed time.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// let stopwatch = Stopwatch::new();
/// assert_eq!(stopwatch.elapsed_secs(), 0.0);
/// assert_eq!(stopwatch.paused(), false);
/// ```
pub fn new() -> Self {
Default::default()
}
/// Returns the elapsed time since the last [`reset`](Stopwatch::reset)
/// of the stopwatch.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.tick(Duration::from_secs(1));
/// assert_eq!(stopwatch.elapsed(), Duration::from_secs(1));
/// ```
///
/// # See Also
///
/// [`elapsed_secs`](Stopwatch::elapsed_secs) - if an `f32` value is desirable instead.
/// [`elapsed_secs_f64`](Stopwatch::elapsed_secs_f64) - if an `f64` is desirable instead.
#[inline]
pub fn elapsed(&self) -> Duration {
self.elapsed
}
/// Returns the elapsed time since the last [`reset`](Stopwatch::reset)
/// of the stopwatch, in seconds.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.tick(Duration::from_secs(1));
/// assert_eq!(stopwatch.elapsed_secs(), 1.0);
/// ```
///
/// # See Also
///
/// [`elapsed`](Stopwatch::elapsed) - if a `Duration` is desirable instead.
/// [`elapsed_secs_f64`](Stopwatch::elapsed_secs_f64) - if an `f64` is desirable instead.
#[inline]
pub fn elapsed_secs(&self) -> f32 {
self.elapsed().as_secs_f32()
}
/// Returns the elapsed time since the last [`reset`](Stopwatch::reset)
/// of the stopwatch, in seconds, as f64.
///
/// # See Also
///
/// [`elapsed`](Stopwatch::elapsed) - if a `Duration` is desirable instead.
/// [`elapsed_secs`](Stopwatch::elapsed_secs) - if an `f32` is desirable instead.
#[inline]
pub fn elapsed_secs_f64(&self) -> f64 {
self.elapsed().as_secs_f64()
}
/// Sets the elapsed time of the stopwatch.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.set_elapsed(Duration::from_secs_f32(1.0));
/// assert_eq!(stopwatch.elapsed_secs(), 1.0);
/// ```
#[inline]
pub fn set_elapsed(&mut self, time: Duration) {
self.elapsed = time;
}
/// Advance the stopwatch by `delta` seconds.
/// If the stopwatch is paused, ticking will not have any effect
/// on elapsed time.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.tick(Duration::from_secs_f32(1.5));
/// assert_eq!(stopwatch.elapsed_secs(), 1.5);
/// ```
pub fn tick(&mut self, delta: Duration) -> &Self {
if !self.paused() {
self.elapsed += delta;
}
self
}
/// Pauses the stopwatch. Any call to [`tick`](Stopwatch::tick) while
/// paused will not have any effect on the elapsed time.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.pause();
/// stopwatch.tick(Duration::from_secs_f32(1.5));
/// assert!(stopwatch.paused());
/// assert_eq!(stopwatch.elapsed_secs(), 0.0);
/// ```
#[inline]
pub fn pause(&mut self) {
self.paused = true;
}
/// Unpauses the stopwatch. Resume the effect of ticking on elapsed time.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.pause();
/// stopwatch.tick(Duration::from_secs_f32(1.0));
/// stopwatch.unpause();
/// stopwatch.tick(Duration::from_secs_f32(1.0));
/// assert!(!stopwatch.paused());
/// assert_eq!(stopwatch.elapsed_secs(), 1.0);
/// ```
#[inline]
pub fn unpause(&mut self) {
self.paused = false;
}
/// Returns `true` if the stopwatch is paused.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// let mut stopwatch = Stopwatch::new();
/// assert!(!stopwatch.paused());
/// stopwatch.pause();
/// assert!(stopwatch.paused());
/// stopwatch.unpause();
/// assert!(!stopwatch.paused());
/// ```
#[inline]
pub fn paused(&self) -> bool {
self.paused
}
/// Resets the stopwatch. The reset doesn't affect the paused state of the stopwatch.
///
/// # Examples
/// ```
/// # use bevy_time::*;
/// use std::time::Duration;
/// let mut stopwatch = Stopwatch::new();
/// stopwatch.tick(Duration::from_secs_f32(1.5));
/// stopwatch.reset();
/// assert_eq!(stopwatch.elapsed_secs(), 0.0);
/// ```
#[inline]
pub fn reset(&mut self) {
self.elapsed = Default::default();
}
}
| true |
0f31e8a6aabcc8a08dc02d42d205cd5ed2ac1a27
|
Rust
|
SnoozeTime/dicom-rs
|
/src/types.rs
|
UTF-8
| 18,317 | 3.125 | 3 |
[] |
no_license
|
//! Types specific to Dicom.
use crate::error::*;
use byteorder::{BigEndian, LittleEndian, ReadBytesExt};
use chrono::NaiveDate;
use std::fmt::{self, Display};
use std::io::Cursor;
use nom::number::Endianness;
use std::convert::TryFrom;
use crate::{Tag, ValueRepresentation};
use crate::parser::sq::Item;
use crate::img::DicomImage;
/// Represent a DICOM file
#[derive(Debug)]
pub struct DicomObject<'buf> {
/// All the tags that were parsed from .dcm file
pub elements: Vec<DataElement<'buf>>,
/// Transfer syntax extracted from x0002
pub transfer_syntax: TransferSyntax,
pub image: Option<DicomImage>,
}
impl<'buf> DicomObject<'buf> {
pub fn new(elements: Vec<DataElement<'buf>>, transfer_syntax: TransferSyntax) -> Self {
Self {
elements,
transfer_syntax,
image: None,
}
}
pub fn append(&mut self, mut elements: Vec<DataElement<'buf>>) {
self.elements.append(&mut elements);
}
pub fn elements(&self) -> &Vec<DataElement> {
&self.elements
}
pub fn get_element(&self, tag: Tag) -> Option<&DataElement> {
self.elements.iter().find(|el| el.tag == tag)
}
pub fn get<T: FromDicomValue + 'static>(&self, tag: Tag) -> T {
match self.try_get(tag) {
Ok(v) => v,
Err(e) => panic!(
"Cannot get value {:?} for tag {:?} = {}",
std::any::TypeId::of::<T>(),
tag,
e
),
}
}
pub fn try_get<T: FromDicomValue>(&self, tag: Tag) -> DicomResult<T> {
match self.get_element(tag) {
Some(ref el) => FromDicomValue::from_element(el, &self.transfer_syntax),
None => Err(DicomError::NoSuchTag(tag)),
}
}
}
/// Data elements are the basic unit of a DICOM object.
///
/// They are made of:
/// - a Tag that indicates what the element is referring to
/// - an optional ValueRepresentation that gives information about the type of the data.
/// - a buffer that represents something. When value representation is known, the library will be
/// able to parse automatically the value to the correct type. Otherwise, it has to be known by
/// the user.
#[derive(Debug)]
pub struct DataElement<'buf> {
pub tag: Tag,
pub vr: Option<ValueRepresentation>,
pub length: u32,
pub data: Value<'buf>,
}
#[derive(Debug)]
pub enum Value<'a> {
Buf(&'a [u8]),
Sequence(Vec<Item<'a>>)
}
/// Transfer syntax defines the endianness and the presence of value representation.
/// It is necessary during parsing. The transfer syntax is defined in the tag (0x0002,0x010) which
/// is at the beginning of the file
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct TransferSyntax {
endianness: Endianness,
is_vr_explicit: bool,
pub compression_scheme: Option<CompressionScheme>,
}
impl TransferSyntax {
pub fn with_compression_scheme(scheme: CompressionScheme) -> Self {
Self {
endianness: Endianness::Little,
is_vr_explicit: true,
compression_scheme: Some(scheme),
}
}
pub fn little_endian_explicit() -> Self {
Self {
endianness: Endianness::Little,
is_vr_explicit: true,
compression_scheme: None,
}
}
pub fn big_endian_explicit() -> Self {
Self {
endianness: Endianness::Big,
is_vr_explicit: true,
compression_scheme: None,
}
}
pub fn little_endian_implicit() -> Self {
Self {
endianness: Endianness::Little,
is_vr_explicit: false,
compression_scheme: None,
}
}
/// Return the endianness in which the dicom data was encoded.
pub fn endianness(&self) -> Endianness {
self.endianness
}
/// Return true if the value representation is explicit in data elements
pub fn is_vr_explicit(&self) -> bool {
self.is_vr_explicit
}
}
impl TryFrom<&Value<'_>> for TransferSyntax {
type Error = DicomError;
fn try_from(v: &Value) -> Result<Self, Self::Error> {
if let Value::Buf(bytes) = v {
let value = std::str::from_utf8(bytes)?;
// If a Value Field containing one or more UIDs is an odd number of bytes in length, the Value Field shall be padded with a single trailing NULL (00H) character to ensure that the Value Field is an even number of bytes in length. See Section 9 and Annex B for a complete specification and examples
// No comment
match value {
"1.2.840.10008.1.2.2\u{0}" => Ok(TransferSyntax::big_endian_explicit()),
"1.2.840.10008.1.2.1\u{0}" => Ok(TransferSyntax::little_endian_explicit()),
"1.2.840.10008.1.2\u{0}" => Ok(TransferSyntax::little_endian_implicit()),
"1.2.840.10008.1.2.4.90" => Ok(TransferSyntax::with_compression_scheme(
CompressionScheme::Jpeg2000Lossless,
)),
_ => Err(DicomError::TransferSyntaxNotSupported(String::from(value))),
}
} else {
Err(DicomError::ConvertTypeExpectBuf("TransferSyntax".to_string()))
}
}
}
/// Sometime DCM files contain the image as JPG...
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum CompressionScheme {
Jpeg2000Lossless,
}
/// Trait to convert a series of bytes to the correct type.
///
/// ```rust
/// use dicom::types::FromDicomValue;
/// use dicom::element::{Value, DataElement};
/// use dicom::{Tag, TransferSyntax};
/// let content = vec![0x00, 0x01];
/// let element = DataElement {
/// data: Value::Buf(&content),
/// vr: None,
/// length: 2,
/// tag: Tag::UNKNOWN(0,0)
/// };
/// let transfer_syntax = TransferSyntax::little_endian_implicit();
/// let value_u16: u16 = FromDicomValue::from_element(&element, &transfer_syntax).unwrap();
/// ```
pub trait FromDicomValue: Sized {
/// Parse the Dicom Type from the bytes
fn from_element(el: &DataElement, transfer_syntax: &TransferSyntax) -> DicomResult<Self>;
}
impl FromDicomValue for u16 {
fn from_element(
el: &DataElement,
transfer_syntax: &TransferSyntax,
) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let mut rdr = Cursor::new(data);
let repr = if let Endianness::Little = transfer_syntax.endianness() {
rdr.read_u16::<LittleEndian>()?
} else {
rdr.read_u16::<BigEndian>()?
};
Ok(repr)
} else {
Err(DicomError::ConvertTypeExpectBuf("u16".to_string()))
}
}
}
/// Implementation of the trait for i32. It corresponds to the VR IS (integer string)
/// A string of characters representing an Integer in base-10 (decimal), shall contain only
/// the characters 0 - 9, with an optional leading "+" or "-".
/// It may be padded with leading and/or trailing spaces. Embedded spaces are not allowed.
///
/// The integer, n, represented shall be in the range:
///
/// -231<= n <= (231-1).
impl FromDicomValue for i32 {
fn from_element(el: &DataElement, _transfer_syntax: &TransferSyntax) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let v = remove_whitespace(std::str::from_utf8(data)?);
let is: i32 = v.parse()?;
Ok(is)
} else {
Err(DicomError::ConvertTypeExpectBuf("i32".to_string()))
}
}
}
fn remove_whitespace(s: &str) -> String {
s.chars().filter(|c| !c.is_whitespace()).collect()
}
impl FromDicomValue for String {
fn from_element(
el: &DataElement,
_transfer_syntax: &TransferSyntax,
) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let v = std::str::from_utf8(data)?;
Ok(v.to_string())
} else {
Err(DicomError::ConvertTypeExpectBuf("String".to_string()))
}
}
}
/// The same DICOM type :) When the VR is known, this will give the correct type.
#[derive(Debug)]
pub enum DicomType {
Str(Vec<String>),
UnsignedInt(Vec<u16>),
Date(Vec<NaiveDate>),
PersonName(Vec<String>),
Age(Vec<Age>),
SignedLong(Vec<i32>),
}
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum AgeFormat {
Day,
Week,
Month,
Year,
}
impl AgeFormat {
pub fn parse_from_str(repr: &str) -> DicomResult<Self> {
match repr {
"D" => Ok(AgeFormat::Day),
"W" => Ok(AgeFormat::Week),
"M" => Ok(AgeFormat::Month),
"Y" => Ok(AgeFormat::Year),
_ => Err(DicomError::ParseAS(format!(
"Unknown age format = {}",
repr
))),
}
}
}
impl Display for AgeFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
AgeFormat::Day => write!(f, "D"),
AgeFormat::Week => write!(f, "W"),
AgeFormat::Month => write!(f, "M"),
AgeFormat::Year => write!(f, "Y"),
}
}
}
/// Age formatted according to DCM protocol. It's always
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Age {
pub age: u8,
pub format: AgeFormat,
}
impl Display for Age {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:03}{}", self.age, self.format)
}
}
impl Age {
pub fn parse_from_str(repr: &str) -> DicomResult<Age> {
if repr.len() != 4 {
return Err(DicomError::ParseAS(format!(
"The length of the Age String should be 4 (got {})",
repr.len()
)));
}
let age: u8 = repr[0..3]
.parse()
.map_err(|e| DicomError::ParseAS(format!("Cannot get integer = {:?}", e)))?;
let format = AgeFormat::parse_from_str(&repr[3..])?;
Ok(Age { age, format })
}
}
impl FromDicomValue for Age {
fn from_element(
el: &DataElement,
_transfer_syntax: &TransferSyntax,
) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let repr = std::str::from_utf8(data)?;
let v = Age::parse_from_str(repr)?;
Ok(v)
} else {
Err(DicomError::ConvertTypeExpectBuf("Age".to_string()))
}
}
}
impl FromDicomValue for NaiveDate {
fn from_element(
el: &DataElement,
_transfer_syntax: &TransferSyntax,
) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let repr = std::str::from_utf8(data)?;
let dt = NaiveDate::parse_from_str(repr, "%Y%m%d")?;
Ok(dt)
} else {
Err(DicomError::ConvertTypeExpectBuf("NaiveDate".to_string()))
}
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct PersonName(pub Vec<String>);
impl FromDicomValue for PersonName {
fn from_element(
el: &DataElement,
_transfer_syntax: &TransferSyntax,
) -> Result<Self, DicomError> {
if let Value::Buf(data) = el.data {
let v = std::str::from_utf8(data)?
.to_string()
.split('^')
.map(|s| s.to_owned())
.collect::<Vec<_>>();
Ok(PersonName(v))
} else {
Err(DicomError::ConvertTypeExpectBuf("PersonName".to_string()))
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tag::Tag;
#[test]
fn parse_years() {
let repr = "014Y";
let age = Age::parse_from_str(repr);
assert!(age.is_ok());
let age = age.unwrap();
assert_eq!(14, age.age);
assert_eq!(AgeFormat::Year, age.format);
}
#[test]
fn parse_months() {
let repr = "114M";
let age = Age::parse_from_str(repr);
assert!(age.is_ok());
let age = age.unwrap();
assert_eq!(114, age.age);
assert_eq!(AgeFormat::Month, age.format);
}
#[test]
fn parse_days() {
let repr = "010D";
let age = Age::parse_from_str(repr);
assert!(age.is_ok());
let age = age.unwrap();
assert_eq!(10, age.age);
assert_eq!(AgeFormat::Day, age.format);
}
#[test]
fn parse_weeks() {
let repr = "004W";
let age = Age::parse_from_str(repr);
assert!(age.is_ok());
let age = age.unwrap();
assert_eq!(4, age.age);
assert_eq!(AgeFormat::Week, age.format);
}
#[test]
fn parse_wrong_length() {
let repr = "004W11";
let age = Age::parse_from_str(repr);
assert!(age.is_err());
let err = age.err().unwrap();
assert_eq!(
"Cannot parse AS to Age = The length of the Age String should be 4 (got 6)",
format!("{}", err).as_str()
);
let repr = "4W";
let age = Age::parse_from_str(repr);
assert!(age.is_err());
let err = age.err().unwrap();
assert_eq!(
"Cannot parse AS to Age = The length of the Age String should be 4 (got 2)",
format!("{}", err).as_str()
);
}
#[test]
fn parse_wrong_uint() {
let repr = "0-4W";
let age = Age::parse_from_str(repr);
assert!(age.is_err());
let err = age.err().unwrap();
assert_eq!(
"Cannot parse AS to Age = Cannot get integer = ParseIntError { kind: InvalidDigit }",
format!("{}", err).as_str()
);
}
#[test]
fn parse_wrong_fmt() {
let repr = "000V";
let age = Age::parse_from_str(repr);
assert!(age.is_err());
let err = age.err().unwrap();
assert_eq!(
"Cannot parse AS to Age = Unknown age format = V",
format!("{}", err).as_str()
);
}
#[test]
fn format_age() {
assert_eq!(
"245W",
&format!(
"{}",
Age {
age: 245,
format: AgeFormat::Week
}
)
);
assert_eq!(
"025Y",
&format!(
"{}",
Age {
age: 25,
format: AgeFormat::Year
}
)
);
assert_eq!(
"001D",
&format!(
"{}",
Age {
age: 1,
format: AgeFormat::Day
}
)
);
assert_eq!(
"020M",
&format!(
"{}",
Age {
age: 20,
format: AgeFormat::Month
}
)
);
}
#[test]
fn from_el_u16() {
let bytes: Vec<u8> = vec![8,0];
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(&bytes),
vr: None,
};
let v: Result<u16, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(8, v.unwrap());
}
#[test]
fn from_el_age() {
let age = Age {
age: 5,
format: AgeFormat::Year,
};
let age_bytes = age.to_string();
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(age_bytes.as_bytes()),
vr: None,
};
let v: Result<Age, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(age, v.unwrap());
}
#[test]
fn from_el_date() {
let date = NaiveDate::from_ymd(2020, 2, 3);
let date_bytes = String::from("20200203");
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(date_bytes.as_bytes()),
vr: None,
};
let v: Result<NaiveDate, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(date, v.unwrap());
}
#[test]
fn from_el_name() {
let expected = PersonName(vec!["BENOIT".to_owned(), "EUDIER".to_owned()]);
let name_bytes = String::from("BENOIT^EUDIER");
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(name_bytes.as_bytes()),
vr: None,
};
let v: Result<PersonName, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(expected, v.unwrap());
}
#[test]
fn from_el_is_positivewithplus() {
let expected = 10i32;
let bytes = String::from(" +10 ");
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(bytes.as_bytes()),
vr: None,
};
let v: Result<i32, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(expected, v.unwrap());
}
#[test]
fn from_el_is_positivewithoutplus() {
let expected = 10i32;
let bytes = String::from(" 10 ");
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(bytes.as_bytes()),
vr: None,
};
let v: Result<i32, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(expected, v.unwrap());
}
#[test]
fn from_el_is_negative() {
let expected = -10i32;
let bytes = String::from(" -10 ");
let el = DataElement {
tag: Tag::x0002x0010,
length: 0,
data: Value::Buf(bytes.as_bytes()),
vr: None,
};
let v: Result<i32, _> =
FromDicomValue::from_element(&el, &TransferSyntax::little_endian_implicit());
assert!(v.is_ok());
assert_eq!(expected, v.unwrap());
}
}
| true |
81670592c984bdb162a573631e1285ecd30b8e60
|
Rust
|
xiuxiu62/rust-workshop
|
/hangman/src/main.rs
|
UTF-8
| 219 | 2.546875 | 3 |
[] |
no_license
|
mod game;
use game::Session;
fn main() {
let mut game = Session::new("Hello World", 5);
match game.guess('c') {
Err(e) => std::panic::panic_any(e),
Ok(_) => {}
};
game.test_display();
}
| true |
9e5a5ce8672057617036f89259af528fd07a881a
|
Rust
|
takatori/rust-sample
|
/2017/rust_by_example/src/casting/casting.rs
|
UTF-8
| 1,671 | 3.671875 | 4 |
[] |
no_license
|
// オーバーフローを起こすようなキャスティングによる警告を無視
#![allow(overflowing_literals)]
fn main() {
let decimal = 65.4321_f32;
// エラー! 暗黙的な型変換はできない。
let integer: u8 = decimal;
// 明示的な型変換
let integer = decimal as u8;
let character = integer as char;
println!("Casting: {} -> {} -> {}", decimal, integer, character);
// 何らかの値を符号なしの型(仮にTとする)へキャスティングすると
// 値がTに収まるまで、std::T::MAX + 1が加算あるいは減算される。
// 1000はすでにu16に収まっているため変化しない
println!("1000 as a u16 is : {}", 1000 as u16);
// 1000 - 256 - 256 - 256 = 232
// 水面下では最下位ビットから8bitが使用され、残りの上位ビットが圧縮される形になる。
println!("1000 as u8 is : {}", 1000 as u8);
// -1 + 256 = 255
println!(" -1 as a u8 is : {}", (-1i8) as u8);
println!("1000 mod 256 is : {}", 1000 % 256);
// 符号付きの型にキャストする場合、結果は以下の2つを行った場合に等しい
// 1. 対応する符号なしの型にキャストする。
// 2. 2の歩数(two's complement)をとる。
// 128をu8にキャストすると128となる。128の8ビットにおける補数は-128
println!(" 128 as a i8 is : {}", 128 as i8);
// 上で示した例から
// 1000 as u8 -> 232
// が成り立つ。232の8ビットにおける補数は-24
println!("1000 as a i8 is : {}", 1000 as i8);
println!(" 232 as a i8 is : {}", 232 as i8);
}
| true |
f2296bf2267f6766d5f5c50731b19674ade370d6
|
Rust
|
Urgau/rsix
|
/src/runtime.rs
|
UTF-8
| 2,823 | 2.921875 | 3 |
[
"MIT",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
//! Low-level implementation details for libc-like runtime libraries.
//!
//! These functions are for implementing thread-local storage (TLS),
//! managing threads, loaded libraries, and other process-wide resources.
//! Most of rsix doesn't care about what other libraries are linked into
//! the program or what they're doing, but the features in this module
//! generally can only be used by one entity within a process.
//!
//! The API for these functions is not stable, and this module is
//! `doc(hidden)`.
//!
//! # Safety
//!
//! This module is intended to be used for implementing a runtime library
//! such as libc. Use of these features for any other purpose is likely
//! to create serious problems.
#![allow(unsafe_code)]
use crate::process::Pid;
use crate::{imp, io};
use std::ffi::{c_void, CStr};
#[cfg(target_arch = "x86")]
#[inline]
pub unsafe fn set_thread_area(u_info: &mut UserDesc) -> io::Result<()> {
imp::syscalls::tls::set_thread_area(u_info)
}
#[cfg(target_arch = "arm")]
#[inline]
pub unsafe fn arm_set_tls(data: *mut c_void) -> io::Result<()> {
imp::syscalls::tls::arm_set_tls(data)
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub unsafe fn set_fs(data: *mut c_void) {
imp::syscalls::tls::set_fs(data)
}
#[inline]
pub unsafe fn set_tid_address(data: *mut c_void) -> Pid {
imp::syscalls::tls::set_tid_address(data)
}
/// `prctl(PR_SET_NAME, name)`
///
/// # References
/// - [Linux]: https://man7.org/linux/man-pages/man2/prctl.2.html
///
/// # Safety
///
/// This is a very low-level feature for implementing threading libraries.
/// See the references links above.
///
/// [Linux]: https://man7.org/linux/man-pages/man2/prctl.2.html
#[inline]
pub unsafe fn set_thread_name(name: &CStr) -> io::Result<()> {
imp::syscalls::tls::set_thread_name(name)
}
#[cfg(target_arch = "x86")]
pub use imp::thread::tls::UserDesc;
/// `syscall(SYS_exit, status)`—Exit the current thread.
///
/// # Safety
///
/// This is a very low-level feature for implementing threading libraries.
#[inline]
pub unsafe fn exit_thread(status: i32) -> ! {
imp::syscalls::tls::exit_thread(status)
}
/// Return fields from the main executable segment headers ("phdrs") relevant
/// to initializing TLS provided to the program at startup.
#[inline]
pub fn startup_tls_info() -> StartupTlsInfo {
imp::thread::tls::startup_tls_info()
}
/// `(getauxval(AT_PHDR), getauxval(AT_PHNUM))`—Returns the address and
/// number of ELF segment headers for the main executable.
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man3/getauxval.3.html
#[cfg(any(linux_raw, all(libc, any(target_os = "android", target_os = "linux"))))]
#[inline]
pub fn exe_phdrs() -> (*const c_void, usize) {
imp::process::exe_phdrs()
}
pub use imp::thread::tls::StartupTlsInfo;
| true |
5ea118343273255dcb4b063a5954ddf468f7296c
|
Rust
|
ar3s3ru/poke-rs
|
/poke-domain/src/pokemon.rs
|
UTF-8
| 1,319 | 2.84375 | 3 |
[] |
no_license
|
// Gotta use a Box<Pin<Future<Result>>> for returning an async result
// from a trait for now, until we have Higher-kinded Types in stable...
use futures::future::BoxFuture;
use serde::Serialize;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum Element {
Normal,
Fight,
Flying,
Poison,
Ground,
Rock,
Bug,
Ghost,
Steel,
Fire,
Water,
Grass,
Electric,
Psychic,
Ice,
Dragon,
Dark,
Fairy,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
#[serde(untagged)]
pub enum Type {
Single(Element),
Double(Element, Element),
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct Stats {
pub speed: u16,
pub special_defense: u16,
pub special_attack: u16,
pub defense: u16,
pub attack: u16,
pub hit_points: u16,
}
#[derive(Clone, Debug, PartialEq, Serialize)]
pub struct Pokemon {
pub dex_id: u32,
pub name: String,
#[serde(rename = "type")]
pub typ: Type,
pub height: u32,
pub weight: u32,
pub base_experience: u32,
pub stats: Stats,
}
pub trait Repository {
type Error: std::error::Error;
fn get<'a>(&'a self, num: u32) -> BoxFuture<'a, Result<Option<Pokemon>, Self::Error>>
where
Self: Sync + 'a;
}
| true |
0d1736c07d024f35a2f7ce78ba11cbd321e9b4fd
|
Rust
|
energister/rusty_engine
|
/src/physics.rs
|
UTF-8
| 7,814 | 2.984375 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"MIT",
"Apache-2.0"
] |
permissive
|
use crate::actor::Actor;
use bevy::prelude::*;
use std::{collections::HashSet, hash::Hash};
pub struct PhysicsPlugin;
impl Plugin for PhysicsPlugin {
fn build(&self, app: &mut AppBuilder) {
app.add_event::<CollisionEvent>()
.add_system(collision_detection.system());
}
}
#[derive(Debug, Clone)]
pub struct CollisionEvent {
pub state: CollisionState,
pub pair: CollisionPair,
}
#[derive(Debug, Clone, Copy)]
pub enum CollisionState {
Begin,
End,
}
impl CollisionState {
pub fn is_begin(&self) -> bool {
match self {
CollisionState::Begin => true,
CollisionState::End => false,
}
}
pub fn is_end(&self) -> bool {
match self {
CollisionState::Begin => false,
CollisionState::End => true,
}
}
}
#[derive(Debug, Default, Eq, Clone)]
pub struct CollisionPair(pub String, pub String);
impl CollisionPair {
pub fn either_contains<T: Into<String>>(&self, label: T) -> bool {
let label = label.into();
(self.0 == label) || (self.1 == label)
}
pub fn either_starts_with<T: Into<String>>(&self, label: T) -> bool {
let label = label.into();
self.0.starts_with(&label) || self.1.starts_with(&label)
}
pub fn one_starts_with<T: Into<String>>(&self, label: T) -> bool {
let label = label.into();
let a_matches = self.0.starts_with(&label);
let b_matches = self.1.starts_with(&label);
(a_matches && !b_matches) || (!a_matches && b_matches)
}
}
impl PartialEq for CollisionPair {
fn eq(&self, other: &Self) -> bool {
((self.0 == other.0) && (self.1 == other.1)) || ((self.0 == other.1) && (self.1 == other.0))
}
}
impl Hash for CollisionPair {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// Make sure we return the same hash no matter which position the same two strings might be
// in (so we match our PartialEq implementation)
if self.0 < self.1 {
self.0.hash(state);
self.1.hash(state);
} else {
self.1.hash(state);
self.0.hash(state);
}
}
}
fn collision_detection(
mut existing_collisions: Local<HashSet<CollisionPair>>,
mut collision_events: EventWriter<CollisionEvent>,
query: Query<&Actor>,
) {
let mut current_collisions = HashSet::<CollisionPair>::new();
'outer: for actor1 in query.iter().filter(|a| a.collision) {
for actor2 in query.iter().filter(|a| a.collision) {
if actor1.label == actor2.label {
// We only need to compare one half of the matrix triangle
continue 'outer;
}
if Collider::colliding(actor1, actor2) {
current_collisions
.insert(CollisionPair(actor1.label.clone(), actor2.label.clone()));
}
}
}
let beginning_collisions: Vec<_> = current_collisions
.difference(&existing_collisions)
.cloned()
.collect();
collision_events.send_batch(beginning_collisions.iter().map(|p| CollisionEvent {
state: CollisionState::Begin,
pair: p.clone(),
}));
for beginning_collision in beginning_collisions {
existing_collisions.insert(beginning_collision);
}
let ending_collisions: Vec<_> = existing_collisions
.difference(¤t_collisions)
.cloned()
.collect();
collision_events.send_batch(ending_collisions.iter().map(|p| CollisionEvent {
state: CollisionState::End,
pair: p.clone(),
}));
for ending_collision in ending_collisions {
let _ = existing_collisions.remove(&ending_collision);
}
}
#[derive(Clone, Debug)]
pub enum Collider {
NoCollider,
Poly(Vec<Vec2>),
}
impl Default for Collider {
fn default() -> Self {
Collider::NoCollider
}
}
impl Collider {
pub fn rect<T: Into<Vec2>>(topleft: T, bottomright: T) -> Self {
let topleft = topleft.into();
let bottomright = bottomright.into();
Self::Poly(vec![
topleft,
Vec2::new(bottomright.x, topleft.y),
bottomright,
Vec2::new(topleft.x, bottomright.y),
])
}
pub fn poly<T: Into<Vec2> + Copy>(points: &[T]) -> Self {
Self::Poly(points.iter().map(|&x| x.into()).collect())
}
pub fn circle_custom(radius: f32, vertices: usize) -> Self {
let mut points = vec![];
for x in 0..=vertices {
let inner = 2.0 * std::f64::consts::PI / vertices as f64 * x as f64;
points.push(Vec2::new(
inner.cos() as f32 * radius,
inner.sin() as f32 * radius,
));
}
Self::Poly(points)
}
pub fn circle(radius: f32) -> Self {
Self::circle_custom(radius, 16)
}
pub fn is_poly(&self) -> bool {
matches!(self, Self::Poly(_))
}
fn rotated(&self, rotation: f32) -> Vec<Vec2> {
let mut rotated_points = Vec::new();
if let Self::Poly(points) = self {
let sin = rotation.sin();
let cos = rotation.cos();
for point in points.iter() {
rotated_points.push(Vec2::new(
point.x * cos - point.y * sin,
point.x * sin + point.y * cos,
));
}
}
rotated_points
}
fn relative_to(&self, actor: &Actor) -> Vec<Vec2> {
self.rotated(actor.rotation)
.iter()
.map(|&v| v * actor.scale + actor.translation) // scale & translation
.collect()
}
pub fn colliding(actor1: &Actor, actor2: &Actor) -> bool {
use Collider::*;
if let NoCollider = actor1.collider {
return false;
}
if let NoCollider = actor2.collider {
return false;
}
if actor1.collider.is_poly() && actor2.collider.is_poly() {
let poly1 = actor1.collider.relative_to(actor1);
let poly2 = actor2.collider.relative_to(actor2);
// Polygon intersection algorithm adapted from
// https://stackoverflow.com/questions/10962379/how-to-check-intersection-between-2-rotated-rectangles
for poly in vec![poly1.clone(), poly2.clone()] {
for (idx, &p1) in poly.iter().enumerate() {
let p2 = poly[(idx + 1) % poly.len()];
let normal = Vec2::new(p2.y - p1.y, p1.x - p2.x);
let mut min_a = None;
let mut max_a = None;
for &p in poly1.iter() {
let projected = normal.x * p.x + normal.y * p.y;
if min_a.is_none() || projected < min_a.unwrap() {
min_a = Some(projected);
}
if max_a.is_none() || projected > max_a.unwrap() {
max_a = Some(projected);
}
}
let mut min_b = None;
let mut max_b = None;
for &p in poly2.iter() {
let projected = normal.x * p.x + normal.y * p.y;
if min_b.is_none() || projected < min_b.unwrap() {
min_b = Some(projected);
}
if max_b.is_none() || projected > max_b.unwrap() {
max_b = Some(projected);
}
}
if max_a < min_b || max_b < min_a {
return false;
}
}
}
return true;
}
false
}
}
| true |
8b89edd7575274a877694f1f5956d0ecc284114d
|
Rust
|
kissmikijr/caolo-backend
|
/sim/simulation/src/noise.rs
|
UTF-8
| 4,367 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#![allow(clippy::many_single_char_names)]
pub use perlin::PerlinNoise;
mod perlin {
use crate::{indices::WorldPosition, prelude::Axial};
use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng};
pub struct PerlinNoise {
seed: u64,
permutations: Box<[u32; 512]>,
}
impl PerlinNoise {
pub fn seed(&self) -> u64 {
self.seed
}
pub fn new(seed: impl Into<Option<u64>>) -> Self {
let seed = seed.into().unwrap_or(0xdeadbeef);
let mut res = Self {
seed,
permutations: Box::new([0; 512]),
};
res.reseed(seed);
res
}
pub fn reseed(&mut self, seed: u64) {
self.seed = seed;
for i in 0..256u32 {
self.permutations[i as usize] = i;
}
let mut rng = SmallRng::seed_from_u64(seed);
self.permutations[0..256].shuffle(&mut rng);
for i in 0..256 {
self.permutations[i + 256] = self.permutations[i];
}
}
pub fn axial_perlin(&self, pos: Axial, room_size: f32) -> f32 {
let [x, y] = pos.to_pixel_pointy(1.0);
let [x, y] = [x / room_size, y / room_size];
self.perlin(x, y, 0.0)
}
pub fn world_perlin(&self, pos: WorldPosition, room_size: f32) -> f32 {
let WorldPosition { room, pos } = pos;
let [_, _, z] = pos.hex_axial_to_cube();
let z = z as f32;
let [x, y] = pos.to_pixel_pointy(4.0);
let [rx, ry] = room.to_pixel_pointy(room_size * 8.0);
let [x, y] = [rx + x, ry + y];
self.perlin(x, y, z)
}
pub fn perlin(&self, x: f32, y: f32, z: f32) -> f32 {
let x0 = x as u32 & 255;
let y0 = y as u32 & 255;
let z0 = z as u32 & 255;
let x = x.fract();
let y = y.fract();
let z = z.fract();
let u = fade(x);
let v = fade(y);
let w = fade(z);
let a = self.permutations[x0 as usize] + y0;
let aa = self.permutations[a as usize] + z0;
let ab = self.permutations[a as usize + 1] + z0;
let b = self.permutations[x0 as usize + 1] + y0;
let ba = self.permutations[b as usize] + z0;
let bb = self.permutations[b as usize + 1] + z0;
interpolate(
interpolate(
interpolate(
grad(self.permutations[aa as usize], x, y, z),
grad(self.permutations[ba as usize], x - 1.0, y, z),
u,
),
interpolate(
grad(self.permutations[ab as usize], x, y - 1.0, z),
grad(self.permutations[bb as usize], x - 1.0, y - 1.0, z),
u,
),
v,
),
interpolate(
interpolate(
grad(self.permutations[aa as usize + 1], x, y, z - 1.0),
grad(self.permutations[ba as usize + 1], x - 1.0, y, z - 1.0),
u,
),
interpolate(
grad(self.permutations[ab as usize + 1], x, y - 1.0, z - 1.0),
grad(
self.permutations[bb as usize + 1],
x - 1.0,
y - 1.0,
z - 1.0,
),
u,
),
v,
),
w,
)
}
}
fn grad(hash: u32, x: f32, y: f32, z: f32) -> f32 {
let h = hash & 15;
let u = if h < 8 { x } else { y };
let v = if h < 4 {
y
} else if h == 12 || h == 14 {
x
} else {
z
};
let a = if h & 1 == 0 { u } else { -u };
let b = if h & 2 == 0 { v } else { -v };
a + b
}
fn interpolate(a0: f32, a1: f32, w: f32) -> f32 {
(a1 - a0) * w + a0
}
fn fade(t: f32) -> f32 {
t * t * t * (t * (t * 6.0 - 15.0) + 10.0)
}
}
| true |
5e6e7de71d83cd8d5d42e4053289be0a650154bd
|
Rust
|
willcrichton/rapier
|
/src/dynamics/joint/revolute_joint.rs
|
UTF-8
| 2,003 | 2.75 | 3 |
[
"Apache-2.0"
] |
permissive
|
use crate::math::{Point, Vector};
use crate::utils::WBasis;
use na::{Unit, Vector5};
#[derive(Copy, Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// A joint that removes all relative motion between two bodies, except for the rotations along one axis.
pub struct RevoluteJoint {
/// Where the revolute joint is attached on the first body, expressed in the local space of the first attached body.
pub local_anchor1: Point<f32>,
/// Where the revolute joint is attached on the second body, expressed in the local space of the second attached body.
pub local_anchor2: Point<f32>,
/// The rotation axis of this revolute joint expressed in the local space of the first attached body.
pub local_axis1: Unit<Vector<f32>>,
/// The rotation axis of this revolute joint expressed in the local space of the second attached body.
pub local_axis2: Unit<Vector<f32>>,
/// The basis orthonormal to `local_axis1`, expressed in the local space of the first attached body.
pub basis1: [Vector<f32>; 2],
/// The basis orthonormal to `local_axis2`, expressed in the local space of the second attached body.
pub basis2: [Vector<f32>; 2],
/// The impulse applied by this joint on the first body.
///
/// The impulse applied to the second body is given by `-impulse`.
pub impulse: Vector5<f32>,
}
impl RevoluteJoint {
/// Creates a new revolute joint with the given point of applications and axis, all expressed
/// in the local-space of the affected bodies.
pub fn new(
local_anchor1: Point<f32>,
local_axis1: Unit<Vector<f32>>,
local_anchor2: Point<f32>,
local_axis2: Unit<Vector<f32>>,
) -> Self {
Self {
local_anchor1,
local_anchor2,
local_axis1,
local_axis2,
basis1: local_axis1.orthonormal_basis(),
basis2: local_axis2.orthonormal_basis(),
impulse: na::zero(),
}
}
}
| true |
649bf95e4a46846f1101b4bcbb09c9177ba4dc6a
|
Rust
|
tanhao1410/learn_rust
|
/learn_actix/src/app/mod.rs
|
UTF-8
| 697 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use actix_web::{HttpServer, App, web};
mod hello;
mod greet;
mod fibonacci;
pub fn start() {
let bind_address = "0.0.0.0:8080";
HttpServer::new(|| {
App::new().configure(routes)
})
.bind(&bind_address)
.unwrap_or_else(|_| panic!("Could not bind server to address {}", &bind_address))
.run();
println!("You can access the server at {}", &bind_address);
}
fn routes(app: &mut web::ServiceConfig) {
app
.route("/hello", web::get().to(hello::get))
.route("/greeting/{name}", web::get().to(greet::get))
.route("path",web::post().to(hello::get))
.route("/fibonacci/{input}", web::get().to(fibonacci::get));
}
| true |
190b476902ab84846157cd86eb198ac67d2aeeec
|
Rust
|
mehcode/diesel
|
/diesel/src/query_builder/aliasing.rs
|
UTF-8
| 6,612 | 2.890625 | 3 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use backend::Backend;
use expression::{AppearsOnTable, Expression, NonAggregate, SelectableExpression};
use query_builder::{AstPass, QueryFragment, SelectStatement};
use query_source::joins::{Inner, Join, JoinOn, LeftOuter};
use query_source::{AppearsInFromClause, Column, Never, Once, QuerySource};
use result::QueryResult;
#[derive(Debug, Clone, Copy, QueryId)]
/// Represents an aliased query.
///
/// This struct is constructed by calling [`.aliased`] on a query. The alias
/// should be generated by using [`diesel_define_alias!`].
///
/// [`.aliased`]: ../query_dsl/trait.QueryDsl.html#method.aliased
/// [`diesel_define_alias!`]: ../macro.diesel_define_alias.html
///
/// A struct of this type represents the SQL `table_name alias` for plain
/// tables, and `(SUBSELECT) alias` for aliased subselects.
pub struct Aliased<Query, Alias> {
query: Query,
alias: Alias,
}
impl<Query, Alias> Aliased<Query, Alias> {
pub(crate) fn new(query: Query, alias: Alias) -> Self {
Self { query, alias }
}
}
impl<Query, Alias> Aliased<Query, Alias>
where
Self: QuerySource,
{
/// Retrieve the select clause of this aliased query as a tuple.
///
/// The returned tuple will be suitable for use in any part of your query.
/// Any expressions that are not columns will be automatically given a
/// unique name.
/// This method is the only way to reference columns from an aliased table.
/// There is no way to retrieve them by name.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate diesel;
/// # include!("../doctest_setup.rs");
/// #
/// # fn main() {
/// diesel_define_alias!(users2);
///
/// let u2 = users::table.aliased(users2);
/// let (u2_id, ..) = u2.selection();
///
/// # #[cfg(feature = "postgres")]
/// # let expected_sql = r#""users2"."id""#;
/// # #[cfg(not(feature = "postgres"))]
/// let expected_sql = "`users2`.`id`";
/// let actual_sql = debug_query::<DB, _>(&u2_id).to_string();
/// assert_eq!(expected_sql, actual_sql);
/// # }
pub fn selection(&self) -> <Self as QuerySource>::DefaultSelection {
self.default_selection()
}
}
impl<Query, Alias> QuerySource for Aliased<Query, Alias>
where
Query: QuerySource,
Query::DefaultSelection: FromAliasedTable<Alias>,
<Query::DefaultSelection as FromAliasedTable<Alias>>::Output: SelectableExpression<Self>,
Alias: Copy,
{
type FromClause = Aliased<Query::FromClause, Alias>;
type DefaultSelection = <Query::DefaultSelection as FromAliasedTable<Alias>>::Output;
fn from_clause(&self) -> Self::FromClause {
Aliased::new(self.query.from_clause(), self.alias)
}
fn default_selection(&self) -> Self::DefaultSelection {
self.query
.default_selection()
.from_aliased_table(self.alias)
}
}
impl<Query, Alias, DB> QueryFragment<DB> for Aliased<Query, Alias>
where
DB: Backend,
Query: QueryFragment<DB>,
Alias: QueryFragment<DB>,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.query.walk_ast(out.reborrow())?;
out.push_sql(" ");
self.alias.walk_ast(out.reborrow())?;
Ok(())
}
}
pub trait FromAliasedTable<Alias> {
type Output;
fn from_aliased_table(self, alias: Alias) -> Self::Output;
}
impl<Col: Column, Alias> FromAliasedTable<Alias> for Col {
type Output = ColumnFromAliasedTable<Self, Alias>;
fn from_aliased_table(self, alias: Alias) -> Self::Output {
ColumnFromAliasedTable::new(self, alias)
}
}
macro_rules! tuple_impls {
($(
$Size:tt {$(
($idx:tt) -> $T:ident, $ST:ident, $TT:ident,
)+}
)+) => {$(
impl<$($T,)+ Alias> FromAliasedTable<Alias> for ($($T,)+)
where
$($T: FromAliasedTable<Alias>,)+
Alias: Copy,
{
type Output = ($($T::Output,)+);
fn from_aliased_table(self, alias: Alias) -> Self::Output {
($(self.$idx.from_aliased_table(alias),)+)
}
}
)*};
}
__diesel_for_each_tuple!(tuple_impls);
#[derive(Debug, Clone, Copy, QueryId)]
pub struct ColumnFromAliasedTable<Col, Alias> {
column: Col,
alias: Alias,
}
impl<Col, Alias> ColumnFromAliasedTable<Col, Alias> {
fn new(column: Col, alias: Alias) -> Self {
Self { column, alias }
}
}
impl<Col, Query, Alias> SelectableExpression<Aliased<Query, Alias>>
for ColumnFromAliasedTable<Col, Alias>
where
Self: AppearsOnTable<Aliased<Query, Alias>>,
Col: SelectableExpression<Query>,
{
}
impl<Col, Query, Alias> AppearsOnTable<Query> for ColumnFromAliasedTable<Col, Alias>
where
Self: Expression,
Query: AppearsInFromClause<Alias, Count = Once>,
{
}
impl<Left, Right, Col, Alias> SelectableExpression<Join<Left, Right, LeftOuter>>
for ColumnFromAliasedTable<Col, Alias>
where
Self: AppearsOnTable<Join<Left, Right, LeftOuter>>,
Left: AppearsInFromClause<Alias, Count = Once>,
Right: AppearsInFromClause<Alias, Count = Never>,
{
}
impl<Left, Right, Col, Alias> SelectableExpression<Join<Left, Right, Inner>>
for ColumnFromAliasedTable<Col, Alias>
where
Self: AppearsOnTable<Join<Left, Right, Inner>>,
Join<Left, Right, Inner>: AppearsInFromClause<Alias, Count = Once>,
{
}
// FIXME: Remove this when overlapping marker traits are stable
impl<Join, On, Col, Alias> SelectableExpression<JoinOn<Join, On>>
for ColumnFromAliasedTable<Col, Alias>
where
Self: SelectableExpression<Join> + AppearsOnTable<JoinOn<Join, On>>,
{
}
// FIXME: Remove this when overlapping marker traits are stable
impl<From, Col, Alias> SelectableExpression<SelectStatement<From>>
for ColumnFromAliasedTable<Col, Alias>
where
Self: SelectableExpression<From> + AppearsOnTable<SelectStatement<From>>,
{
}
impl<Col, Alias> Expression for ColumnFromAliasedTable<Col, Alias>
where
Col: Expression,
{
type SqlType = Col::SqlType;
}
impl<Col, Alias> NonAggregate for ColumnFromAliasedTable<Col, Alias>
where
Col: NonAggregate,
{
}
impl<Col, Alias, DB> QueryFragment<DB> for ColumnFromAliasedTable<Col, Alias>
where
DB: Backend,
Col: Column,
Alias: QueryFragment<DB>,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.alias.walk_ast(out.reborrow())?;
out.push_sql(".");
out.push_identifier(Col::NAME)?;
Ok(())
}
}
impl<Query, Alias, T> AppearsInFromClause<T> for Aliased<Query, Alias>
where
Alias: AppearsInFromClause<T>,
{
type Count = Alias::Count;
}
| true |
c0ebb0b9791be40fd8f9e08d35b7ec30431ee996
|
Rust
|
hbdgr/cybernetics
|
/tests/objects_api.rs
|
UTF-8
| 4,976 | 2.765625 | 3 |
[] |
no_license
|
extern crate cybernetics;
extern crate rocket;
extern crate serde_json;
mod common;
use common::object_helpers;
use common::rocket_helpers;
use cybernetics::primitives::header::ObjectType;
use rocket::http::{ContentType, Status};
use serde_json::json;
#[test]
fn create_object() {
let obj_json = object_helpers::test_content_json(ObjectType::PrimaryElement, "test_object");
let client = rocket_helpers::rocket_client();
let mut response = client
.post("/objects")
.body(obj_json.to_string())
.header(ContentType::JSON)
.dispatch();
assert_eq!(response.status(), Status::Created);
let json_response: serde_json::Value =
serde_json::from_str(&response.body_string().unwrap()).unwrap();
assert_eq!(
json_response.get("content").unwrap(),
&json!({"header": { "object_type": "PrimaryElement"},"body":"test_object"})
);
assert_eq!(
json_response["hash"].as_str(),
Some("f1dee37be385017d470584765ae9dd577a4a189b4f5c1320a912d71fd2ec92b5")
);
}
#[test]
fn create_duplication() {
let body_str = "duplicate";
rocket_helpers::create_test_element(body_str);
rocket_helpers::create_test_object_expect_status(
ObjectType::PrimaryElement,
body_str,
Status::Conflict,
);
}
#[test]
fn get_object() {
let created_obj_hash = rocket_helpers::create_test_element("obj_to_get");
let client = rocket_helpers::rocket_client();
let mut response = client
.get(format!("/objects/{}", created_obj_hash))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let json_response: serde_json::Value =
serde_json::from_str(&response.body_string().unwrap()).unwrap();
assert_eq!(
json_response.get("content").unwrap(),
&json!({"header": { "object_type": "PrimaryElement"},"body":"obj_to_get"})
);
assert_eq!(
json_response["hash"].as_str(),
Some("c2c3061b44f977c4cf1cf690806b0bb7cb3b1f0233a0f27343281a93c7486cb2")
);
}
#[test]
fn get_all() {
rocket_helpers::create_test_element("obj1");
rocket_helpers::create_test_element("obj2");
let client = rocket_helpers::rocket_client();
let mut response = client.get("/objects").dispatch();
assert_eq!(response.status(), Status::Ok);
let json_response: serde_json::Value =
serde_json::from_str(&response.body_string().unwrap()).unwrap();
assert_eq!(json_response.is_array(), true);
assert!(
json_response.as_array().unwrap().len() > 0,
"lenght of array should be greater than 0"
);
}
#[test]
fn put_object() {
let created_obj_hash = rocket_helpers::create_test_element("before_put");
let new_body_json =
object_helpers::test_content_json(ObjectType::PrimaryElement, "new_better..");
let client = rocket_helpers::rocket_client();
let mut response = client
.put(format!("/objects/{}", created_obj_hash))
.body(&new_body_json.to_string())
.header(ContentType::JSON)
.dispatch();
assert_eq!(response.status(), Status::Created);
// old object should be deleted
let old_obj_response = client
.get(format!("/objects/{}", created_obj_hash))
.dispatch();
assert_eq!(old_obj_response.status(), Status::NotFound);
let json_response: serde_json::Value =
serde_json::from_str(&response.body_string().unwrap()).unwrap();
assert!(
created_obj_hash != json_response["hash"].as_str().unwrap(),
"new hash should be different"
);
assert_eq!(
json_response.get("content").unwrap(),
&json!({"header": { "object_type": "PrimaryElement"},"body":"new_better.."})
);
assert_eq!(
json_response["hash"].as_str(),
Some("de4be1db48876db43a0127de88d1e6fab4dbe52689eff4ff450a1942a444595b"),
);
}
#[test]
fn put_duplicated() {
let body_str = "put_duplicated";
let created_obj_hash = rocket_helpers::create_test_element(body_str);
let new_same_body_json =
object_helpers::test_content_json(ObjectType::PrimaryElement, body_str);
let client = rocket_helpers::rocket_client();
// conflict for existing object
let response_conflict = client
.put(format!("/objects/{}", created_obj_hash))
.body(&new_same_body_json.to_string())
.header(ContentType::JSON)
.dispatch();
assert_eq!(response_conflict.status(), Status::Conflict);
}
#[test]
fn delete_object() {
let body_str = "obj_to_delete";
let created_obj_hash = rocket_helpers::create_test_element(body_str);
let client = rocket_helpers::rocket_client();
let response = client
.delete(format!("/objects/{}", created_obj_hash))
.dispatch();
assert_eq!(response.status(), Status::NoContent);
let response = client
.get(format!("/objects/{}", created_obj_hash))
.dispatch();
assert_eq!(response.status(), Status::NotFound);
}
| true |
f8f88b1dbde3a49d1caee4c5c3b6a2ea6a587cbd
|
Rust
|
growingspaghetti/project-euler
|
/rust/src/m23.rs
|
UTF-8
| 5,987 | 3.578125 | 4 |
[] |
no_license
|
//! See [m21](./m21.rs)
///
/// A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
///
/// A number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n.
///
/// As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.
///
/// Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
///
///
///
/// ```rust
/// use self::project_euler::m23::sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers;
/// assert_eq!(sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers(), 4179871);
/// ```
pub fn sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers() -> u64 {
fn proper_divisors_sigma_one(num: u64) -> u64 {
if num < 2 {
panic!()
}
crate::m21::divisor_function_sigma_one_function(num) - num
}
let mut abundant_numbers: Vec<u64> = vec![];
for a in 2..28123u64 {
let d = proper_divisors_sigma_one(a);
if d > a {
abundant_numbers.push(a);
}
}
let check_array = &mut [false; 28123];
abundant_numbers
.iter()
.for_each(|&d| check_array[d as usize] = true);
let mut sum = 0u64;
fn is_sum_of_two_abundant_numbers(
n: u64,
abundant_numbers: &[u64],
check_array: &[bool],
) -> bool {
for &a in abundant_numbers {
// n = sum(a,b) || sum(b,a)
// when a reached the half point of n, the other half of n is already pair searched.
if n / 2 < a {
break;
}
let b = n - a;
if check_array[b as usize] {
return true;
}
}
false
}
for i in 1..28123u64 {
if !is_sum_of_two_abundant_numbers(i, &abundant_numbers, check_array) {
sum += i;
}
}
sum
}
struct Index {
i: usize,
_ite: Box<dyn Iterator<Item = usize>>,
}
impl Index {
fn increment(&mut self) {
self.i += self._ite.next().unwrap();
}
fn new() -> Self {
Index {
i: 5,
_ite: Box::new(vec![2usize, 4].into_iter().cycle()),
}
}
}
fn rule_out_square(sieve: &mut Vec<bool>, prime: usize) {
for i in (prime * prime..sieve.len()).step_by(prime) {
sieve[i] = false;
}
}
fn primes(under: u32) -> Vec<u32> {
let mut primes: Vec<u32> = vec![2u32, 3u32];
let mut sieve = vec![true; under as usize];
let sqrt = (sieve.len() as f64).sqrt() as usize;
let mut index = Index::new();
loop {
if index.i > sqrt {
break;
}
if sieve[index.i] {
primes.push(index.i as u32);
rule_out_square(&mut sieve, index.i);
}
index.increment();
}
loop {
if index.i >= sieve.len() {
break;
}
if sieve[index.i] {
primes.push(index.i as u32);
}
index.increment();
}
primes
}
struct AbundantNumberScanner {
under: u32,
_primes: Vec<u32>,
_pair_sieve: Vec<bool>,
}
impl AbundantNumberScanner {
fn new(under: u32) -> Self {
AbundantNumberScanner {
under: under,
_primes: primes(under),
_pair_sieve: vec![false; under as usize],
}
}
fn _divide_fully(&self, n: &mut u32, d: u32, side: &mut u32, sum: &mut u32) {
if *n % d == 0 {
let mut exp = 0u32;
while {
*n /= d;
exp += 1;
*n % d == 0
} {}
*side = (*n as f32).sqrt() as u32;
*sum *= (d.pow(exp + 1) - 1) / (d - 1);
}
}
fn _sum_of_divisors(&mut self, mut n: u32) -> u32 {
let mut side = (n as f32).sqrt() as u32;
let mut sum = 1u32;
for &p in self._primes.iter() {
if p > side || n == 1 {
break;
}
self._divide_fully(&mut n, p, &mut side, &mut sum);
}
if n != 1 {
sum *= (n * n - 1) / (n - 1);
}
sum
}
fn init_abundant_num_pair_sieve(&mut self) {
let mut abundant_numbers = vec![];
for n in 12..self.under {
let sum = self._sum_of_divisors(n) - n;
if sum > n {
abundant_numbers.push(n);
}
}
for (i, &a) in abundant_numbers.iter().enumerate() {
for &b in abundant_numbers[i..].iter() {
if let Some(n) = self._pair_sieve.get_mut((a + b) as usize) {
*n = true;
}
}
}
}
fn non_pair_sum(&mut self) -> u32 {
let mut non_pair_sum = 0u32;
for n in 1..self.under {
if !self._pair_sieve[n as usize] {
non_pair_sum += n;
}
}
non_pair_sum
}
}
/// ```rust
/// use self::project_euler::m23::sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers_2;
/// assert_eq!(sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers_2(), 4179871);
/// ```
pub fn sum_of_integers_which_cannot_be_written_as_the_sum_of_two_abundant_numbers_2() -> u32 {
let mut a = AbundantNumberScanner::new(28_124);
a.init_abundant_num_pair_sieve();
a.non_pair_sum()
}
| true |
e42cfa06c971b1988ac8efdd215e029f5341ecfd
|
Rust
|
Ben-Lichtman/advent_of_code_2020
|
/src/day11.rs
|
UTF-8
| 5,612 | 3.125 | 3 |
[] |
no_license
|
use std::{fs::read_to_string, mem::swap};
#[derive(Clone, Copy, Debug)]
enum Cell {
Floor,
Empty,
Occupied,
}
#[derive(Debug)]
struct Automata {
x: usize,
y: usize,
state: Box<[Cell]>,
next_state: Box<[Cell]>,
}
impl Automata {
fn new(input: Vec<Vec<Cell>>) -> Self {
let y = input.len();
let x = input[0].len();
let mut state = Vec::with_capacity(x * y);
input
.into_iter()
.flat_map(|v| v.into_iter())
.for_each(|s| state.push(s));
let state = state.into_boxed_slice();
let next_state = state.clone();
Self {
x,
y,
state,
next_state,
}
}
fn coord_to_index(&self, x: usize, y: usize) -> usize { y * self.x + x }
fn verify_coord(&self, x: usize, y: usize) -> bool { x < self.x && y < self.y }
fn kernel_index_1(&self, x: usize, y: usize, i: usize) -> Option<(usize, usize)> {
// 0 <= i < 8
let x = x + i % 3;
let y = y + i / 3;
match (x.checked_sub(1), y.checked_sub(1)) {
(Some(x), Some(y)) if self.verify_coord(x, y) => Some((x, y)),
_ => None,
}
}
fn get_kernel_1(&self, x: usize, y: usize) -> [Option<Cell>; 9] {
let mut table = [None; 9];
(0..9).for_each(|i| {
table[i] = self
.kernel_index_1(x, y, i)
.map(|(x, y)| self.coord_to_index(x, y))
.map(|i| self.state[i])
});
table
}
fn print(&self) {
for y in 0..self.y {
for x in 0..self.x {
let c = match self.state[self.coord_to_index(x, y)] {
Cell::Floor => '.',
Cell::Empty => 'L',
Cell::Occupied => '#',
};
print!("{}", c)
}
println!("");
}
}
fn next_1(&mut self) -> (u32, u32, u32, u32) {
let mut changed = 0;
for y in 0..self.y {
for x in 0..self.x {
let target_index = self.coord_to_index(x, y);
let current_cell = self.state[target_index];
let get_adjacent_occupied = || {
let mut table = self.get_kernel_1(x, y);
// Ignore center of kernel
table[4] = None;
table
.iter()
.filter(|x| match x {
Some(Cell::Occupied) => true,
_ => false,
})
.count()
};
self.next_state[target_index] = match current_cell {
Cell::Floor => Cell::Floor,
Cell::Empty => {
if get_adjacent_occupied() == 0 {
changed += 1;
Cell::Occupied
}
else {
Cell::Empty
}
}
Cell::Occupied => {
if get_adjacent_occupied() >= 4 {
changed += 1;
Cell::Empty
}
else {
Cell::Occupied
}
}
}
}
}
let (floor, empty, occupied) =
self.next_state
.iter()
.fold((0, 0, 0), |(floor, empty, occupied), new| match new {
Cell::Floor => (floor + 1, empty, occupied),
Cell::Empty => (floor, empty + 1, occupied),
Cell::Occupied => (floor, empty, occupied + 1),
});
swap(&mut self.state, &mut self.next_state);
(changed, floor, empty, occupied)
}
fn next_2(&mut self) -> (u32, u32, u32, u32) {
let mut changed = 0;
for y in 0..self.y {
for x in 0..self.x {
let target_index = self.coord_to_index(x, y);
let current_cell = self.state[target_index];
// println!("Checking {:?}", (x, y));
let get_view_occupied = || {
let directions = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
];
directions
.iter()
.filter(|(x_move, y_move)| {
let mut x = x as isize;
let mut y = y as isize;
loop {
x += x_move;
y += y_move;
// Check for occupied seats
if x < 0 || y < 0 {
break false;
}
let x = x as usize;
let y = y as usize;
if !self.verify_coord(x, y) {
break false;
}
match self.state[self.coord_to_index(x, y)] {
Cell::Floor => (),
Cell::Empty => break false,
Cell::Occupied => break true,
}
}
})
.count()
};
self.next_state[target_index] = match current_cell {
Cell::Floor => Cell::Floor,
Cell::Empty => {
if get_view_occupied() == 0 {
changed += 1;
Cell::Occupied
}
else {
Cell::Empty
}
}
Cell::Occupied => {
if get_view_occupied() >= 5 {
changed += 1;
Cell::Empty
}
else {
Cell::Occupied
}
}
}
}
}
let (floor, empty, occupied) =
self.next_state
.iter()
.fold((0, 0, 0), |(floor, empty, occupied), new| match new {
Cell::Floor => (floor + 1, empty, occupied),
Cell::Empty => (floor, empty + 1, occupied),
Cell::Occupied => (floor, empty, occupied + 1),
});
swap(&mut self.state, &mut self.next_state);
(changed, floor, empty, occupied)
}
}
fn parse_to_vec(i: &str) -> Vec<Vec<Cell>> {
i.lines()
.map(|l| {
l.chars()
.map(|c| match c {
'.' => Cell::Floor,
'L' => Cell::Empty,
'#' => Cell::Occupied,
_ => panic!("Invalid character"),
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
}
fn main() {
let input = read_to_string("input/day11/1.txt").unwrap();
let nums = parse_to_vec(&input);
let mut a = Automata::new(nums.clone());
let occupied = loop {
let (changed, _, _, occupied) = a.next_1();
if changed == 0 {
break occupied;
}
};
println!("Part 1: {}", occupied);
let mut a = Automata::new(nums.clone());
let occupied = loop {
println!("===============");
a.print();
let (changed, _, _, occupied) = a.next_2();
if changed == 0 {
break occupied;
}
};
println!("===============");
a.print();
println!("===============");
println!("Part 2: {}", occupied);
}
| true |
0590d62109e7efcc9961c6ac16ef8918408d9a06
|
Rust
|
incker2/luno-rust
|
/src/lightning.rs
|
UTF-8
| 2,970 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
use serde::Deserialize;
use std::collections::HashMap;
use crate::{client, Currency};
#[derive(Debug, Deserialize)]
pub struct LightningWithdrawal {
pub invoice_id: String,
pub payment_request: String,
}
#[derive(Debug, Deserialize)]
pub struct LightningReceiveRequest {
pub invoice_id: String,
pub payment_request: String,
}
pub struct LightningSendBuilder<'a> {
pub(crate) luno_client: &'a client::LunoClient,
pub(crate) url: reqwest::Url,
pub(crate) params: HashMap<&'a str, String>,
}
impl<'a> LightningSendBuilder<'a> {
pub fn with_currency(&mut self, currency: Currency) -> &mut LightningSendBuilder<'a> {
self.params.insert("currency", currency.to_string());
self
}
pub fn with_description(&mut self, description: &'a str) -> &mut LightningSendBuilder<'a> {
self.params.insert("description", description.to_string());
self
}
pub fn with_external_id(&mut self, external_id: &'a str) -> &mut LightningSendBuilder<'a> {
self.params.insert("external_id", external_id.to_string());
self
}
pub async fn send(&self) -> Result<LightningWithdrawal, reqwest::Error> {
let url = self.url.clone();
self.luno_client
.http
.post(url)
.basic_auth(
self.luno_client.credentials.key.to_owned(),
Some(self.luno_client.credentials.secret.to_owned()),
)
.form(&self.params)
.send()
.await?
.json()
.await
}
}
pub struct LightningReceiveBuilder<'a> {
pub(crate) luno_client: &'a client::LunoClient,
pub(crate) url: reqwest::Url,
pub(crate) params: HashMap<&'a str, String>,
}
impl<'a> LightningReceiveBuilder<'a> {
pub fn with_currency(&mut self, currency: Currency) -> &mut LightningReceiveBuilder<'a> {
self.params.insert("currency", currency.to_string());
self
}
pub fn with_description(&mut self, description: &'a str) -> &mut LightningReceiveBuilder<'a> {
self.params.insert("description", description.to_string());
self
}
pub fn with_expires_at(&mut self, expires_at: u64) -> &mut LightningReceiveBuilder<'a> {
self.params.insert("expires_at", expires_at.to_string());
self
}
pub async fn create(&self) -> Result<LightningReceiveRequest, reqwest::Error> {
let url = self.url.clone();
self.luno_client
.http
.post(url)
.basic_auth(
self.luno_client.credentials.key.to_owned(),
Some(self.luno_client.credentials.secret.to_owned()),
)
.form(&self.params)
.send()
.await?
.json()
.await
}
}
#[derive(Debug, Deserialize)]
pub struct LightningInvoiceLookupResponse {
pub payment_request: String,
pub settled_amount: String,
pub status: String,
}
| true |
2a40d2a6cfd12fc115144df2271fe3da4c28f21c
|
Rust
|
GTime/cqrs-eventsourcing
|
/src/cqrs.rs
|
UTF-8
| 1,405 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
use std::marker::PhantomData;
use crate::{Aggregate, Command, DomainEvent, Error, Handlers, MetaData, Store};
// #[derive()]
pub struct CQRS<A, E, ES>
where
A: Aggregate,
E: DomainEvent<A>,
ES: Store<A, E>,
{
handlers: Handlers<A, E>,
store: ES,
_a: PhantomData<A>,
_e: PhantomData<E>,
}
impl<A, E, ES> CQRS<A, E, ES>
where
A: Aggregate,
E: DomainEvent<A>,
ES: Store<A, E>,
{
pub fn new(store: ES, handlers: Handlers<A, E>) -> CQRS<A, E, ES> {
Self {
store,
handlers,
_a: PhantomData,
_e: PhantomData,
}
}
pub async fn execute<C: Command<A, E>>(
&mut self,
command: C,
meta: MetaData,
) -> Result<(), Error> {
// Call command's before
let cmd = C::before(command, &self.store).await?;
// Assemble Aggragate
let id = &cmd.id();
let aggregate_context = self.store.assemble_aggregate(id.clone()).await?;
// Handle Command
let generated_events = cmd.handle(&aggregate_context).await?;
// Store New Events
let commited_events = &self
.store
.append(generated_events, aggregate_context, meta)
.await?;
// Run Handlers
for handler in &self.handlers {
handler.handle(commited_events).await;
}
Ok(())
}
}
| true |
946e03413028ccf966531b1f23c2902aa0a97252
|
Rust
|
jonhoo/stuck
|
/src/main.rs
|
UTF-8
| 10,384 | 2.71875 | 3 |
[] |
no_license
|
use futures_util::future::Either;
use futures_util::stream::StreamExt;
use std::collections::{BTreeMap, HashMap};
use std::io::{self};
use structopt::StructOpt;
use termion::raw::IntoRawMode;
use tokio::prelude::*;
use tui::backend::Backend;
use tui::backend::TermionBackend;
use tui::layout::{Constraint, Direction, Layout};
use tui::style::{Color, Modifier, Style};
use tui::widgets::{Block, Borders, Paragraph, Text, Widget};
use tui::Terminal;
const DRAW_EVERY: std::time::Duration = std::time::Duration::from_millis(200);
const WINDOW: std::time::Duration = std::time::Duration::from_secs(10);
#[derive(Debug, StructOpt)]
/// A live profile visualizer.
///
/// Pipe the output of the appropriate `bpftrace` command into this program, and enjoy.
/// Happy profiling!
struct Opt {
/// Treat input as a replay of a trace and emulate time accordingly.
#[structopt(long)]
replay: bool,
}
#[derive(Debug, Default)]
struct Thread {
window: BTreeMap<usize, String>,
}
fn main() -> Result<(), io::Error> {
let opt = Opt::from_args();
if termion::is_tty(&io::stdin().lock()) {
eprintln!("Don't type input to this program, that's silly.");
return Ok(());
}
let stdout = io::stdout().into_raw_mode()?;
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let mut tids = BTreeMap::new();
let mut inframe = None;
let mut stack = String::new();
terminal.hide_cursor()?;
terminal.clear()?;
terminal.draw(|mut f| {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(2)
.constraints([Constraint::Percentage(100)].as_ref())
.split(f.size());
Block::default()
.borders(Borders::ALL)
.title("Common thread fan-out points")
.title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD))
.render(&mut f, chunks[0]);
})?;
// a _super_ hacky way for us to get input from the TTY
let tty = termion::get_tty()?;
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
std::thread::spawn(move || {
use termion::input::TermRead;
for key in tty.keys() {
if let Err(_) = tx.send(key) {
return;
}
}
});
let mut rt = tokio::runtime::Runtime::new()?;
rt.block_on(async move {
let stdin = tokio::io::BufReader::new(tokio::io::stdin());
let lines = stdin.lines().map(Either::Left);
let rx = rx.map(Either::Right);
let mut input = futures_util::stream::select(lines, rx);
let mut lastprint = 0;
let mut lasttime = 0;
while let Some(got) = input.next().await {
match got {
Either::Left(line) => {
let line = line.unwrap();
if line.starts_with("Error") || line.starts_with("Attaching") {
} else if !line.starts_with(' ') || line.is_empty() {
if let Some((time, tid)) = inframe {
// new frame starts, so finish the old one
// skip empty stack frames
if !stack.is_empty() {
let nxt_stack = String::with_capacity(stack.capacity());
let mut stack = std::mem::replace(&mut stack, nxt_stack);
// remove trailing ;
let stackn = stack.len();
stack.truncate(stackn - 1);
tids.entry(tid)
.or_insert_with(Thread::default)
.window
.insert(time, stack);
if opt.replay && lasttime != 0 && time - lasttime > 1_000_000 {
tokio::time::delay_for(std::time::Duration::from_nanos(
(time - lasttime) as u64,
))
.await;
}
lasttime = time;
if std::time::Duration::from_nanos((time - lastprint) as u64)
> DRAW_EVERY
{
draw(&mut terminal, &mut tids)?;
lastprint = time;
}
}
inframe = None;
}
if !line.is_empty() {
// read time + tid
let mut fields = line.split_whitespace();
let time = fields
.next()
.expect("no time given for frame")
.parse::<usize>()
.expect("invalid time");
let tid = fields
.next()
.expect("no tid given for frame")
.parse::<usize>()
.expect("invalid tid");
inframe = Some((time, tid));
}
} else {
assert!(inframe.is_some());
stack.push_str(line.trim());
stack.push(';');
}
}
Either::Right(key) => {
let key = key?;
if let termion::event::Key::Char('q') = key {
break;
}
}
}
}
terminal.clear()?;
Ok(())
})
}
fn draw<B: Backend>(
terminal: &mut Terminal<B>,
threads: &mut BTreeMap<usize, Thread>,
) -> Result<(), io::Error> {
// keep our window relatively short
let mut latest = 0;
for thread in threads.values() {
if let Some(&last) = thread.window.keys().next_back() {
latest = std::cmp::max(latest, last);
}
}
if latest > WINDOW.as_nanos() as usize {
for thread in threads.values_mut() {
// trim to last 5 seconds
thread.window = thread
.window
.split_off(&(latest - WINDOW.as_nanos() as usize));
}
}
// now only reading
let threads = &*threads;
let mut lines = Vec::new();
let mut hits = HashMap::new();
let mut maxes = BTreeMap::new();
for (_, thread) in threads {
// add up across the window
let mut max: Option<(&str, usize)> = None;
for (&time, stack) in &thread.window {
latest = std::cmp::max(latest, time);
let mut at = stack.len();
while let Some(stack_start) = stack[..at].rfind(';') {
at = stack_start;
let stack = &stack[at + 1..];
let count = hits.entry(stack).or_insert(0);
*count += 1;
if let Some((_, max_count)) = max {
if *count >= max_count {
max = Some((stack, *count));
}
} else {
max = Some((stack, *count));
}
}
}
if let Some((stack, count)) = max {
let e = maxes.entry(stack).or_insert((0, 0));
e.0 += 1;
e.1 += count;
}
hits.clear();
}
if maxes.is_empty() {
return Ok(());
}
let max = *maxes.values().map(|(_, count)| count).max().unwrap() as f64;
// sort by where most threads are
let mut maxes: Vec<_> = maxes.into_iter().collect();
maxes.sort_by_key(|(_, (nthreads, _))| *nthreads);
for (stack, (nthreads, count)) in maxes.iter().rev() {
let count = *count;
let nthreads = *nthreads;
if stack.find(';').is_none() {
// this thread just shares the root frame
continue;
}
if count == 1 {
// this thread only has one sample ever, let's reduce noise...
continue;
}
let red = (128.0 * count as f64 / max) as u8;
let color = Color::Rgb(255, 128 - red, 128 - red);
if nthreads == 1 {
lines.push(Text::styled(
format!("A thread fanned out from here {} times\n", count),
Style::default().modifier(Modifier::BOLD).fg(color),
));
} else {
lines.push(Text::styled(
format!(
"{} threads fanned out from here {} times\n",
nthreads, count
),
Style::default().modifier(Modifier::BOLD).fg(color),
));
}
for (i, frame) in stack.split(';').enumerate() {
// https://github.com/alexcrichton/rustc-demangle/issues/34
let offset = &frame[frame.rfind('+').unwrap_or_else(|| frame.len())..];
let frame =
rustc_demangle::demangle(&frame[..frame.rfind('+').unwrap_or_else(|| frame.len())]);
if i == 0 {
lines.push(Text::styled(
format!(" {}{}\n", frame, offset),
Style::default(),
));
} else {
lines.push(Text::styled(
format!(" {}{}\n", frame, offset),
Style::default().modifier(Modifier::DIM),
));
}
}
lines.push(Text::raw("\n"));
}
terminal.draw(|mut f| {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(2)
.constraints([Constraint::Percentage(100)].as_ref())
.split(f.size());
Paragraph::new(lines.iter())
.block(
Block::default()
.borders(Borders::ALL)
.title("Common thread fan-out points")
.title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)),
)
.render(&mut f, chunks[0]);
})?;
Ok(())
}
| true |
79213afb4b24d22ab8fa42411b4c08b42d6586c9
|
Rust
|
MadRubicant/racer
|
/src/racer/ast_types.rs
|
UTF-8
| 19,394 | 3.046875 | 3 |
[
"MIT"
] |
permissive
|
//! type conversion between racer types and libsyntax types
use core::{self, BytePos, Match, MatchType, Scope, Session};
use matchers::ImportInfo;
use nameres;
use std::fmt;
use std::path::{Path as FilePath, PathBuf};
use syntax::ast::{
self, GenericBound, GenericBounds, GenericParamKind, TraitRef, TyKind, WherePredicate,
};
use syntax::print::pprust;
use syntax::source_map;
/// The leaf of a `use` statement.
#[derive(Clone, Debug)]
pub struct PathAlias {
/// the leaf of Use Tree
/// it can be one of one of 3 types, e.g.
/// use std::collections::{self, hashmap::*, HashMap};
pub kind: PathAliasKind,
/// The path.
pub path: Path,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum PathAliasKind {
Ident(String),
Self_(String),
Glob,
}
impl AsRef<Path> for PathAlias {
fn as_ref(&self) -> &Path {
&self.path
}
}
// Represents a type. Equivilent to rustc's ast::Ty but can be passed across threads
#[derive(Debug, Clone)]
pub enum Ty {
Match(Match),
PathSearch(Path, Scope), // A path + the scope to be able to resolve it
Tuple(Vec<Ty>),
FixedLengthVec(Box<Ty>, String), // ty, length expr as string
RefPtr(Box<Ty>),
Vec(Box<Ty>),
Unsupported,
}
impl Ty {
pub(crate) fn from_ast(ty: &ast::Ty, scope: &Scope) -> Option<Ty> {
match ty.node {
TyKind::Tup(ref items) => {
let mut res = Vec::new();
for t in items {
res.push(match Ty::from_ast(t, scope) {
Some(t) => t,
None => return None,
});
}
Some(Ty::Tuple(res))
}
TyKind::Rptr(ref _lifetime, ref ty) => {
Ty::from_ast(&ty.ty, scope).map(|ref_ty| Ty::RefPtr(Box::new(ref_ty)))
}
TyKind::Path(_, ref path) => Some(Ty::PathSearch(Path::from_ast(path), scope.clone())),
TyKind::Array(ref ty, ref expr) => Ty::from_ast(ty, scope).map(|racer_ty| {
Ty::FixedLengthVec(Box::new(racer_ty), pprust::expr_to_string(&expr.value))
}),
TyKind::Slice(ref ty) => {
Ty::from_ast(ty, scope).map(|ref_ty| Ty::Vec(Box::new(ref_ty)))
}
TyKind::Never => None,
_ => {
trace!("unhandled Ty node: {:?}", ty.node);
None
}
}
}
}
impl fmt::Display for Ty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Ty::Match(ref m) => write!(f, "{}", m.matchstr),
Ty::PathSearch(ref p, _) => write!(f, "{}", p),
Ty::Tuple(ref vec) => {
let mut first = true;
write!(f, "(")?;
for field in vec.iter() {
if first {
write!(f, "{}", field)?;
first = false;
} else {
write!(f, ", {}", field)?;
}
}
write!(f, ")")
}
Ty::FixedLengthVec(ref ty, ref expr) => {
write!(f, "[")?;
write!(f, "{}", ty)?;
write!(f, "; ")?;
write!(f, "{}", expr)?;
write!(f, "]")
}
Ty::Vec(ref ty) => {
write!(f, "[")?;
write!(f, "{}", ty)?;
write!(f, "]")
}
Ty::RefPtr(ref ty) => write!(f, "&{}", ty),
Ty::Unsupported => write!(f, "_"),
}
}
}
/// Prefix of path.
/// e.g. for path `::std` => Global
/// for path `self::abc` => Self_
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PathPrefix {
Crate,
Super,
Self_,
Global,
}
impl PathPrefix {
pub(crate) fn from_str(s: &str) -> Option<PathPrefix> {
match s {
"crate" => Some(PathPrefix::Crate),
"super" => Some(PathPrefix::Super),
"self" => Some(PathPrefix::Self_),
"{{root}}" => Some(PathPrefix::Global),
_ => None,
}
}
}
// The racer implementation of an ast::Path. Difference is that it is Send-able
#[derive(Clone, PartialEq)]
pub struct Path {
pub prefix: Option<PathPrefix>,
pub segments: Vec<PathSegment>,
}
impl Path {
pub fn is_single(&self) -> bool {
self.segments.len() == 1
}
pub fn from_ast(path: &ast::Path) -> Path {
let mut segments = Vec::new();
for seg in path.segments.iter() {
let name = seg.ident.name.to_string();
let mut types = Vec::new();
// TODO: support GenericArgs::Parenthesized (A path like `Foo(A,B) -> C`)
if let Some(ref params) = seg.args {
if let ast::GenericArgs::AngleBracketed(ref angle_args) = **params {
angle_args.args.iter().for_each(|arg| {
if let ast::GenericArg::Type(ty) = arg {
if let TyKind::Path(_, ref path) = ty.node {
types.push(Path::from_ast(path));
}
}
})
}
}
segments.push(PathSegment::new(name, types));
}
Path {
prefix: None,
segments,
}
}
pub fn generic_types(&self) -> ::std::slice::Iter<Path> {
self.segments[self.segments.len() - 1].types.iter()
}
pub fn single(seg: PathSegment) -> Path {
Path {
prefix: None,
segments: vec![seg],
}
}
pub fn set_prefix(&mut self) {
if self.prefix.is_some() {
return;
}
self.prefix = self
.segments
.first()
.and_then(|seg| PathPrefix::from_str(&seg.name));
if self.prefix.is_some() {
self.segments.remove(0);
}
}
pub fn from_vec(global: bool, v: Vec<&str>) -> Path {
Self::from_iter(global, v.into_iter().map(|s| s.to_owned()))
}
pub fn from_svec(global: bool, v: Vec<String>) -> Path {
Self::from_iter(global, v.into_iter())
}
pub fn from_iter(global: bool, iter: impl Iterator<Item = String>) -> Path {
let mut prefix = if global {
Some(PathPrefix::Global)
} else {
None
};
let segments: Vec<_> = iter
.enumerate()
.filter_map(|(i, s)| {
if i == 0 && prefix.is_none() {
if let Some(pre) = PathPrefix::from_str(&s) {
prefix = Some(pre);
return None;
}
}
Some(PathSegment::from(s))
}).collect();
Path { prefix, segments }
}
pub fn extend(&mut self, path: Path) -> &mut Self {
self.segments.extend(path.segments);
self
}
pub fn len(&self) -> usize {
self.segments.len()
}
pub fn name(&self) -> Option<&str> {
self.segments.last().map(|seg| &*seg.name)
}
}
impl fmt::Debug for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "P[")?;
let mut first = true;
for seg in &self.segments {
if first {
write!(f, "{}", seg.name)?;
first = false;
} else {
write!(f, "::{}", seg.name)?;
}
if !seg.types.is_empty() {
write!(f, "<")?;
let mut t_first = true;
for typath in &seg.types {
if t_first {
write!(f, "{:?}", typath)?;
t_first = false;
} else {
write!(f, ",{:?}", typath)?
}
}
write!(f, ">")?;
}
}
write!(f, "]")
}
}
impl fmt::Display for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut first = true;
for seg in &self.segments {
if first {
write!(f, "{}", seg.name)?;
first = false;
} else {
write!(f, "::{}", seg.name)?;
}
if !seg.types.is_empty() {
write!(f, "<")?;
let mut t_first = true;
for typath in &seg.types {
if t_first {
write!(f, "{}", typath)?;
t_first = false;
} else {
write!(f, ", {}", typath)?
}
}
write!(f, ">")?;
}
}
Ok(())
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct PathSegment {
pub name: String,
pub types: Vec<Path>,
}
impl PathSegment {
pub fn new(name: String, types: Vec<Path>) -> Self {
PathSegment { name, types }
}
}
impl From<String> for PathSegment {
fn from(name: String) -> Self {
PathSegment {
name,
types: Vec::new(),
}
}
}
/// Information about generic types in a match
#[derive(Clone, PartialEq)]
pub struct PathSearch {
pub path: Path,
pub filepath: PathBuf,
pub point: BytePos,
}
impl fmt::Debug for PathSearch {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Search [{:?}, {:?}, {:?}]",
self.path,
self.filepath.display(),
self.point
)
}
}
/// Wrapper struct for representing trait bounds.
/// Its usages are
/// - for generic types like T: Debug + Clone
/// - for trait inheritance like trait A: Debug + Clone
/// - for impl_trait like fn f(a: impl Debug + Clone)
/// - for dynamic traits(dyn_trait) like Box<Debug + Clone> or Box<dyn Debug + Clone>
#[derive(Clone, Debug, PartialEq)]
pub struct TraitBounds(Vec<PathSearch>);
impl TraitBounds {
/// checks if it contains a trait, whick its name is 'name'
pub fn find_by_name(&self, name: &str) -> Option<&PathSearch> {
self.0.iter().find(|path_search| {
let seg = &path_search.path.segments;
if seg.len() != 1 {
return false;
}
&seg[0].name == name
})
}
/// Search traits included in bounds and return Matches
pub fn get_traits(&self, session: &Session) -> Vec<Match> {
self.0
.iter()
.filter_map(|ps| {
nameres::resolve_path_with_str(
&ps.path,
&ps.filepath,
ps.point,
core::SearchType::ExactMatch,
core::Namespace::Type,
session,
).nth(0)
}).collect()
}
#[inline]
pub fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn from_generic_bounds<P: AsRef<FilePath>>(
bounds: &GenericBounds,
filepath: P,
offset: i32,
) -> TraitBounds {
let vec = bounds
.iter()
.filter_map(|bound| {
if let GenericBound::Trait(ref ptrait_ref, _) = *bound {
let ast_path = &ptrait_ref.trait_ref.path;
let source_map::BytePos(point) = ast_path.span.lo();
let path = Path::from_ast(&ast_path);
let path_search = PathSearch {
path: path,
filepath: filepath.as_ref().to_path_buf(),
point: BytePos::from((point as i32 + offset) as u32),
};
Some(path_search)
} else {
None
}
}).collect();
TraitBounds(vec)
}
fn extend(&mut self, other: Self) {
self.0.extend(other.0)
}
fn to_paths(&self) -> Vec<Path> {
self.0.iter().map(|paths| paths.path.clone()).collect()
}
}
/// Argument of generics like T: From<String>
/// It's intended to use this type only for declaration of type parameter.
// TODO: impl trait's name
// TODO: it has too many PathBuf
#[derive(Clone, Debug, PartialEq)]
pub struct TypeParameter {
/// the name of type parameter declared in generics, like 'T'
pub name: String,
/// The point 'T' appears
pub point: BytePos,
/// file path
pub filepath: PathBuf,
/// bounds
pub bounds: TraitBounds,
/// Resolved Type
pub resolved: Option<PathSearch>,
}
impl TypeParameter {
pub fn name(&self) -> &str {
&(*self.name)
}
pub(crate) fn into_match(self) -> Option<Match> {
// TODO: contextstr, local
Some(Match {
matchstr: self.name,
filepath: self.filepath,
point: self.point,
coords: None,
local: false,
mtype: MatchType::TypeParameter(Box::new(self.bounds)),
contextstr: String::new(),
docs: String::new(),
})
}
pub(crate) fn resolve(&mut self, paths: PathSearch) {
self.resolved = Some(paths);
}
pub(crate) fn resolved(&self) -> Option<&PathSearch> {
self.resolved.as_ref()
}
pub fn to_racer_path(&self) -> Path {
let segment = PathSegment {
name: self.name.clone(),
types: self.bounds.to_paths(),
};
Path::single(segment)
}
}
/// List of Args in generics, e.g. <T: Clone, U, P>
/// Now it's intended to use only for type parameters
// TODO: should we extend this type enable to handle both type parameters and true types?
#[derive(Clone, Debug, Default, PartialEq)]
pub struct GenericsArgs(pub Vec<TypeParameter>);
impl GenericsArgs {
pub(crate) fn find_type_param(&self, name: &str) -> Option<&TypeParameter> {
self.0.iter().find(|v| &v.name == name)
}
pub(crate) fn extend(&mut self, other: GenericsArgs) {
self.0.extend(other.0);
}
pub(crate) fn from_generics<'a, P: AsRef<FilePath>>(
generics: &'a ast::Generics,
filepath: P,
offset: i32,
) -> Self {
let mut args = Vec::new();
for param in generics.params.iter() {
match param.kind {
// TODO: lifetime support
GenericParamKind::Lifetime => {}
// TODO: should we handle default type here?
GenericParamKind::Type { default: _ } => {
let param_name = param.ident.name.to_string();
let source_map::BytePos(point) = param.ident.span.lo();
let bounds = TraitBounds::from_generic_bounds(¶m.bounds, &filepath, offset);
args.push(TypeParameter {
name: param_name,
point: BytePos::from((point as i32 + offset) as u32),
filepath: filepath.as_ref().to_path_buf(),
bounds,
resolved: None,
})
}
}
}
for pred in generics.where_clause.predicates.iter() {
match pred {
WherePredicate::BoundPredicate(bound) => match bound.bounded_ty.node {
TyKind::Path(ref _qself, ref path) => {
if let Some(seg) = path.segments.get(0) {
let name = seg.ident.name.as_str();
if let Some(mut tp) = args.iter_mut().find(|tp| tp.name == name) {
tp.bounds.extend(TraitBounds::from_generic_bounds(
&bound.bounds,
&filepath,
offset,
));
}
}
}
// TODO 'self' support
TyKind::ImplicitSelf => {}
_ => {}
},
// TODO: lifetime support
WherePredicate::RegionPredicate(_) => {}
_ => {}
}
}
GenericsArgs(args)
}
pub fn get_idents(&self) -> Vec<String> {
self.0.iter().map(|g| g.name.clone()).collect()
}
pub fn args(&self) -> impl Iterator<Item = &TypeParameter> {
self.0.iter()
}
pub fn args_mut(&mut self) -> impl Iterator<Item = &mut TypeParameter> {
self.0.iter_mut()
}
pub fn search_param_by_path(&self, path: &Path) -> Option<(usize, &TypeParameter)> {
if !path.is_single() {
return None;
}
let query = &path.segments[0].name;
for (i, typ) in self.0.iter().enumerate() {
if typ.name() == query {
return Some((i, typ));
}
}
None
}
}
/// `Impl` information
#[derive(Clone, Debug, PartialEq)]
pub struct ImplHeader {
self_path: Path,
trait_path: Option<Path>,
generics: GenericsArgs,
filepath: PathBuf,
// TODO: should be removed
local: bool,
impl_start: BytePos,
block_start: BytePos,
}
impl ImplHeader {
pub(crate) fn new(
generics: &ast::Generics,
path: &FilePath,
otrait: &Option<TraitRef>,
self_type: &ast::Ty,
offset: BytePos,
local: bool,
impl_start: BytePos,
block_start: BytePos,
) -> Option<Self> {
let generics = GenericsArgs::from_generics(generics, path, offset.0 as i32);
let self_path = destruct_ref_ptr(&self_type.node).map(Path::from_ast)?;
let trait_path = otrait.as_ref().map(|tref| Path::from_ast(&tref.path));
Some(ImplHeader {
self_path,
trait_path,
generics,
filepath: path.to_owned(),
local,
impl_start,
block_start,
})
}
pub(crate) fn self_path(&self) -> &Path {
&self.self_path
}
pub(crate) fn trait_path(&self) -> Option<&Path> {
self.trait_path.as_ref()
}
pub(crate) fn file_path(&self) -> &FilePath {
self.filepath.as_ref()
}
pub(crate) fn generics(&self) -> &GenericsArgs {
&self.generics
}
pub(crate) fn impl_start(&self) -> BytePos {
self.impl_start
}
// TODO: should be removed
pub(crate) fn is_local(&self) -> bool {
self.local || self.trait_path.is_some()
}
pub(crate) fn is_trait(&self) -> bool {
self.trait_path.is_some()
}
pub(crate) fn resolve_trait(
&self,
session: &Session,
import_info: &ImportInfo,
) -> Option<Match> {
nameres::resolve_path(
self.trait_path()?,
self.file_path(),
self.impl_start,
core::SearchType::ExactMatch,
core::Namespace::Type,
session,
import_info,
).nth(0)
}
pub(crate) fn scope_start(&self) -> BytePos {
self.block_start.increment()
}
}
fn destruct_ref_ptr(ty: &TyKind) -> Option<&ast::Path> {
match ty {
TyKind::Rptr(_, ref ty) => destruct_ref_ptr(&ty.ty.node),
TyKind::Path(_, ref path) => Some(path),
_ => None,
}
}
| true |
61cecb1150dc92ae92a9a639bf97bbef9f872325
|
Rust
|
cwood821/zeppelin
|
/src/notifier.rs
|
UTF-8
| 298 | 2.5625 | 3 |
[] |
no_license
|
use std::collections::HashMap;
pub fn notify(message: &str, url: &str) {
let mut map = HashMap::new();
map.insert("text", message);
let client = reqwest::Client::new();
let res = client.post(url)
.json(&map)
.send();
if res.is_err() {
// TODO: Do some logging
}
}
| true |
ec66ab9251caa61a7e47b337a56ccb1054088854
|
Rust
|
ram-hacks/fireplace
|
/src/main.rs
|
UTF-8
| 2,962 | 2.984375 | 3 |
[] |
no_license
|
#![feature(globs)]
extern crate getopts;
extern crate ncurses;
use view::*;
use data::*;
use ncurses::*;
use std::io;
use std::os;
use getopts::{optopt,optflag,getopts,OptGroup,Matches};
mod view;
mod data;
fn print_usage(program: &str, _opts: &[OptGroup]) {
println!("Usage: {} [--title <TITLE>] [--fixed [--lower <LOWER BOUND>] --upper <UPPER_BOUND>] [--variable]", program);
for opt in _opts.iter() {
print!("-{} ",opt.short_name);
print!("--{} ",opt.long_name);
print!("{} ",opt.hint);
print!("{}\n",opt.desc);
}
}
fn initialize_program() -> Option<Program> {
let args: Vec<String> = os::args();
let program_name = args[0].clone();
let opts = [
optflag("h", "help", "print this help menu"),
optopt("t", "title", "set the title of the graph", "TITLE"),
optflag("f", "fixed", "use a fixed scale (upper bound must be set)"),
optflag("v", "variable", "use a variable scale (default)"),
optopt("l", "lower", "lower bound of y axis when using a fixed scale", "MINIMUM"),
optopt("u", "upper", "upper bound of y axis when using a fixed scale", "MAXIMUM"),
];
let matches = match getopts(args.tail(), opts) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
print_usage(program_name.as_slice(), opts);
return None;
}
return Some(Program {
data: Vec::new(),
title: matches.opt_str("t"),
scale: initialize_scale(&matches),
});
}
fn initialize_scale(matches:&Matches)-> ScaleMode {
if matches.opt_present("f") {
// Configure lower and upper bounds
let raw_lower = matches.opt_str("l");
let raw_upper = matches.opt_str("u");
let lower:f64 = match raw_lower {
Some(txt) => from_str(txt.as_slice()).unwrap(),
None => 0.0,
};
let upper = match raw_upper {
Some(txt) => from_str(txt.as_slice()).unwrap(),
None => {
panic!("Upper bound must be specified");
}
};
return Fixed(lower,upper);
} else {
return Variable;
}
}
fn main() {
let p: Option<Program> = initialize_program();
match p {
Some(_) => {},
None => {return;}
}
let mut program = p.unwrap();
/* Setup ncurses. */
initscr();
curs_set(CURSOR_INVISIBLE);
// While input is availabe on stdin
for line in io::stdin().lines() {
// Clear the screen
clear();
// Parse an f64 from the inputted line
let value:f64 = from_str(line.unwrap().as_slice().trim()).unwrap();
// Push it into the array
program.data.push(value);
view::render_frame(&program);
// Refresh the screen with the new frame
refresh();
}
endwin();
}
| true |
ab0360cc9616466f487478300e60ce8981a99096
|
Rust
|
danreeves/rust-tests
|
/src/error_handling.rs
|
UTF-8
| 377 | 3.015625 | 3 |
[] |
no_license
|
extern crate rand;
#[test]
fn result() {
fn might_error() -> Result<i32, String> {
if rand::random() {
return Ok(1);
}
Err("it borked".to_string())
}
let did_it_work = might_error();
if let Ok(num) = did_it_work {
assert_eq!(num, 1);
} else {
assert_eq!(did_it_work.unwrap_err(), "it borked");
}
}
| true |
a0c2f1bba343d9480c9b50f1404f922314c58ede
|
Rust
|
rocallahan/object
|
/src/pe.rs
|
UTF-8
| 7,924 | 2.578125 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::slice;
use alloc::borrow;
use alloc::vec::Vec;
use goblin::pe;
use {DebugFileInfo, Machine, Object, ObjectSection, ObjectSegment, SectionKind, Symbol, SymbolKind,
SymbolMap};
/// A PE object file.
#[derive(Debug)]
pub struct PeFile<'data> {
pe: pe::PE<'data>,
data: &'data [u8],
}
/// An iterator over the loadable sections of a `PeFile`.
#[derive(Debug)]
pub struct PeSegmentIterator<'data, 'file>
where
'data: 'file,
{
file: &'file PeFile<'data>,
iter: slice::Iter<'file, pe::section_table::SectionTable>,
}
/// A loadable section of a `PeFile`.
#[derive(Debug)]
pub struct PeSegment<'data, 'file>
where
'data: 'file,
{
file: &'file PeFile<'data>,
section: &'file pe::section_table::SectionTable,
}
/// An iterator over the sections of a `PeFile`.
#[derive(Debug)]
pub struct PeSectionIterator<'data, 'file>
where
'data: 'file,
{
file: &'file PeFile<'data>,
iter: slice::Iter<'file, pe::section_table::SectionTable>,
}
/// A section of a `PeFile`.
#[derive(Debug)]
pub struct PeSection<'data, 'file>
where
'data: 'file,
{
file: &'file PeFile<'data>,
section: &'file pe::section_table::SectionTable,
}
/// An iterator over the symbols of a `PeFile`.
#[derive(Debug)]
pub struct PeSymbolIterator<'data, 'file>
where
'data: 'file,
{
exports: slice::Iter<'file, pe::export::Export<'data>>,
imports: slice::Iter<'file, pe::import::Import<'data>>,
}
impl<'data> PeFile<'data> {
/// Get the PE headers of the file.
// TODO: this is temporary to allow access to features this crate doesn't provide yet
#[inline]
pub fn pe(&self) -> &pe::PE<'data> {
&self.pe
}
/// Parse the raw PE file data.
pub fn parse(data: &'data [u8]) -> Result<Self, &'static str> {
let pe = pe::PE::parse(data).map_err(|_| "Could not parse PE header")?;
Ok(PeFile { pe, data })
}
}
impl<'data, 'file> Object<'data, 'file> for PeFile<'data>
where
'data: 'file,
{
type Segment = PeSegment<'data, 'file>;
type SegmentIterator = PeSegmentIterator<'data, 'file>;
type Section = PeSection<'data, 'file>;
type SectionIterator = PeSectionIterator<'data, 'file>;
type SymbolIterator = PeSymbolIterator<'data, 'file>;
fn machine(&self) -> Machine {
match self.pe.header.coff_header.machine {
// TODO: Arm/Arm64
pe::header::COFF_MACHINE_X86 => Machine::X86,
pe::header::COFF_MACHINE_X86_64 => Machine::X86_64,
_ => Machine::Other,
}
}
fn segments(&'file self) -> PeSegmentIterator<'data, 'file> {
PeSegmentIterator {
file: self,
iter: self.pe.sections.iter(),
}
}
fn section_data_by_name(&self, section_name: &str) -> Option<&'data [u8]> {
for section in &self.pe.sections {
if let Ok(name) = section.name() {
if name == section_name {
return Some(
&self.data[section.pointer_to_raw_data as usize..]
[..section.size_of_raw_data as usize],
);
}
}
}
None
}
fn sections(&'file self) -> PeSectionIterator<'data, 'file> {
PeSectionIterator {
file: self,
iter: self.pe.sections.iter(),
}
}
fn symbols(&'file self) -> PeSymbolIterator<'data, 'file> {
// TODO: return COFF symbols for object files
PeSymbolIterator {
exports: [].iter(),
imports: [].iter(),
}
}
fn dynamic_symbols(&'file self) -> PeSymbolIterator<'data, 'file> {
PeSymbolIterator {
exports: self.pe.exports.iter(),
imports: self.pe.imports.iter(),
}
}
fn symbol_map(&self) -> SymbolMap<'data> {
// TODO: untested
let mut symbols: Vec<_> = self.symbols().filter(SymbolMap::filter).collect();
symbols.sort_by_key(|x| x.address);
SymbolMap { symbols }
}
#[inline]
fn is_little_endian(&self) -> bool {
// TODO: always little endian? The COFF header has some bits in the
// characteristics flags, but these are obsolete.
true
}
#[inline]
fn has_debug_symbols(&self) -> bool {
// TODO: look at what the mingw toolchain does with DWARF-in-PE, and also
// whether CodeView-in-PE still works?
false
}
fn debug_file_info(&self) -> Option<DebugFileInfo> { None }
fn entry(&self) -> u64 {
self.pe.entry as u64
}
}
impl<'data, 'file> Iterator for PeSegmentIterator<'data, 'file> {
type Item = PeSegment<'data, 'file>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|section| {
PeSegment {
file: self.file,
section,
}
})
}
}
impl<'data, 'file> ObjectSegment<'data> for PeSegment<'data, 'file> {
#[inline]
fn address(&self) -> u64 {
u64::from(self.section.virtual_address)
}
#[inline]
fn size(&self) -> u64 {
u64::from(self.section.virtual_size)
}
fn data(&self) -> &'data [u8] {
&self.file.data[self.section.pointer_to_raw_data as usize..]
[..self.section.size_of_raw_data as usize]
}
#[inline]
fn name(&self) -> Option<&str> {
self.section.name().ok()
}
}
impl<'data, 'file> Iterator for PeSectionIterator<'data, 'file> {
type Item = PeSection<'data, 'file>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|section| {
PeSection {
file: self.file,
section,
}
})
}
}
impl<'data, 'file> ObjectSection<'data> for PeSection<'data, 'file> {
#[inline]
fn address(&self) -> u64 {
u64::from(self.section.virtual_address)
}
#[inline]
fn size(&self) -> u64 {
u64::from(self.section.virtual_size)
}
fn data(&self) -> &'data [u8] {
&self.file.data[self.section.pointer_to_raw_data as usize..]
[..self.section.size_of_raw_data as usize]
}
fn name(&self) -> Option<&str> {
self.section.name().ok()
}
#[inline]
fn segment_name(&self) -> Option<&str> {
None
}
#[inline]
fn kind(&self) -> SectionKind {
if self.section.characteristics
& (pe::section_table::IMAGE_SCN_CNT_CODE | pe::section_table::IMAGE_SCN_MEM_EXECUTE)
!= 0
{
SectionKind::Text
} else if self.section.characteristics & pe::section_table::IMAGE_SCN_CNT_INITIALIZED_DATA
!= 0
{
SectionKind::Data
} else if self.section.characteristics & pe::section_table::IMAGE_SCN_CNT_UNINITIALIZED_DATA
!= 0
{
SectionKind::UninitializedData
} else {
SectionKind::Unknown
}
}
}
impl<'data, 'file> Iterator for PeSymbolIterator<'data, 'file> {
type Item = Symbol<'data>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(export) = self.exports.next() {
return Some(Symbol {
kind: SymbolKind::Unknown,
section_kind: Some(SectionKind::Unknown),
global: true,
name: Some(export.name),
address: export.rva as u64,
size: 0,
});
}
if let Some(import) = self.imports.next() {
let name = match import.name {
borrow::Cow::Borrowed(name) => Some(name),
_ => None,
};
return Some(Symbol {
kind: SymbolKind::Unknown,
section_kind: None,
global: true,
name: name,
address: 0,
size: 0,
});
}
None
}
}
| true |
f51b2b00d3b1d707978cfbf0bdc4974065ceaeb4
|
Rust
|
samgwise/555nm-soundscape
|
/src/config/config_tests.rs
|
UTF-8
| 4,993 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
#[cfg(test)]
mod config_test {
use config::*;
use ::epochsy;
fn test_config() -> Soundscape {
Soundscape {
listen_addr: Address { host: "127.0.0.1".to_string(), port: 4000 },
subscribers: vec![ Address { host: "127.0.0.1".to_string(), port: 4000 } ],
scenes: vec![],
metro_step_ms: 10,
voice_limit: 16,
default_level: 1.0,
background_scene: None,
speaker_positions: Speakers { positions: vec![] },
ignore_extra_speakers: Some (true),
is_fallback_slave: None,
daily_schedule: Some (DailySchedule { start: "17:30:00".to_string(), end: "23:00:00".to_string() }),
}
}
#[test]
fn diag() {
let show_diag = true;//false; // toggle to fail and print the following diag info
let config = test_config();
let today = local_today();
println!("today {} ({:?})", from_timestamp(moment(&today) as i64), today);
let start = next_start_time(&config, &today);
println!("start {} ({:?})", from_timestamp(moment(&start) as i64), start);
let end = next_end_time(&config, &start).unwrap();
println!("end {} ({:?})", from_timestamp(moment(&end) as i64), end);
println!("now {} ({:?})", from_timestamp(moment(&localtime()) as i64), localtime());
println!("Is now in schedule? {}", is_in_schedule(&localtime(), &start, &end));
assert!(!show_diag);
}
#[test]
fn schedules() {
let config = test_config();
println!("now: {:?}", &epochsy::now());
println!("localtime: {:?}", localtime());
println!("11:45:26 <=> {:?}", &epochsy::hms(11, 45, 26));
println!("midnight today <=> {:?}", epochsy::moment(&to_localtime(&epochsy::floor_to_days(&epochsy::now()))));
println!("is_in_schedule_now currently? {:?}", is_in_schedule_now(&config, &localtime()));
let start = next_start_time(&config, &localtime());
assert!(start.moment > 0);
// assert_eq!(from_timestamp(start).timestamp(), start);
// let test_now = now();
// assert_eq!(from_timestamp(test_now.timestamp()).timestamp(), test_now.timestamp());
// println!("{:?} <=> {:?}", test_now, test_now.timestamp());
let end_time_from_start = next_end_time(&config, &start);
assert_ne!(end_time_from_start, None);
let end_from_start = end_time_from_start.unwrap();
let end_time = next_end_time(&config, &localtime());
assert_ne!(end_time, None);
let end = end_time.unwrap();
// assert_eq!(to_localtime(&from_localtime(&start)).moment, start.moment);
println!("working with start: {:?} and end: {:?}", start, end );
println!("total interval in seconds: {:?}", epochsy::diff(&end, &start));
// characteristic features
assert!(moment(&start) < moment(&end_from_start));
// assert expected duration
// assert_eq!(moment(&end_from_start) - moment(&start), 23400);
// assert_eq!(start_local.hour(), 18);
// assert_eq!(start_local.minute(), 30);
// assert_eq!(start_local.second(), 0);
//
// assert_eq!(end_local.hour(), 1);
// assert_eq!(end_local.minute(), 0);
// assert_eq!(end_local.second(), 0);
println!("is_in_schedule currently? {:?} ({:?} to {:?})", is_in_schedule(&localtime(), &start, &end), start, end);
assert!(is_in_schedule(&start, &start, &end_from_start));
assert!(is_in_schedule(&end_from_start, &start, &end_from_start));
let before = epochsy::append(&local_today(), &epochsy::hms(15, 39, 0));
println!("before: {} ({:?})", from_timestamp(moment(&before) as i64), before);
assert!(before.moment < next_start_time(&config, &before).moment);
assert!(next_start_time(&config, &before).moment < end_from_start.moment);
let after = epochsy::append(&local_today(), &epochsy::hms(25, 31, 0));
println!("after: {} ({:?})", from_timestamp(moment(&after) as i64), after);
let during = epochsy::append(&local_today(), &epochsy::hms(18, 35, 0));
println!("during: {} ({:?})", from_timestamp(moment(&during) as i64), during);
assert!(!is_in_schedule(&before, &start, &end));
assert!(!is_in_schedule(&after, &start, &end));
// The start bound is not inclusive.
// assert!(is_in_schedule_now(&config, &start));
assert!(!is_in_schedule_now(&config, &before));
assert!(is_in_schedule_now(&config, &during));
assert!(!is_in_schedule_now(&config, &after));
let before_end = epochsy::append(&local_today(), &epochsy::hms(20, 29, 0));
println!("before_end: {} ({:?})", from_timestamp(moment(&before_end) as i64), before_end);
assert!(is_in_schedule_now(&config, &before_end));
}
}
| true |
41f2b9c157dec8e601692ef1159cb941a230d904
|
Rust
|
mesalock-linux/crates-sgx
|
/vendor/bincode/src/config/trailing.rs
|
UTF-8
| 1,146 | 2.796875 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::prelude::v1::*;
use de::read::SliceReader;
use {ErrorKind, Result};
/// A trait for erroring deserialization if not all bytes were read.
pub trait TrailingBytes {
/// Checks a given slice reader to determine if deserialization used all bytes in the slice.
fn check_end(reader: &SliceReader) -> Result<()>;
}
/// A TrailingBytes config that will allow trailing bytes in slices after deserialization.
#[derive(Copy, Clone)]
pub struct AllowTrailing;
/// A TrailingBytes config that will cause bincode to produce an error if bytes are left over in the slice when deserialization is complete.
#[derive(Copy, Clone)]
pub struct RejectTrailing;
impl TrailingBytes for AllowTrailing {
#[inline(always)]
fn check_end(_reader: &SliceReader) -> Result<()> {
Ok(())
}
}
impl TrailingBytes for RejectTrailing {
#[inline(always)]
fn check_end(reader: &SliceReader) -> Result<()> {
if reader.is_finished() {
Ok(())
} else {
Err(Box::new(ErrorKind::Custom(
"Slice had bytes remaining after deserialization".to_string(),
)))
}
}
}
| true |
582d004ba85cfae2591a2e616a056528786873b5
|
Rust
|
suhassrivats/Data-Structures-And-Algorithms-Implementation
|
/Grokking-the-Coding-Interview-Patterns-for-Coding-Questions/13. Pattern Top 'K' Elements/Top 'K' Numbers (easy).py
|
UTF-8
| 1,962 | 3.859375 | 4 |
[] |
no_license
|
'''
Problem Statement
Given an unsorted array of numbers, find the ‘K’ largest numbers in it.
Note: For a detailed discussion about different approaches to solve this problem, take a look at Kth Smallest Number.
Example 1:
Input: [3, 1, 5, 12, 2, 11], K = 3
Output: [5, 12, 11]
Example 2:
Input: [5, 12, 11, -1, 12], K = 3
Output: [12, 11, 12]
'''
#mycode
from heapq import *
def find_k_largest_numbers(nums, k):
result = []
# TODO: Write your code here
for num in nums:
if len(result) < k:
heappush(result, num)
else:
if num > result[0]:
heappop(result)
heappush(result, num)
return result
def main():
print("Here are the top K numbers: " +
str(find_k_largest_numbers([3, 1, 5, 12, 2, 11], 3)))
print("Here are the top K numbers: " +
str(find_k_largest_numbers([5, 12, 11, -1, 12], 3)))
main()
#answer
from heapq import *
def find_k_largest_numbers(nums, k):
minHeap = []
# put first 'K' numbers in the min heap
for i in range(k):
heappush(minHeap, nums[i])
# go through the remaining numbers of the array, if the number from the array is bigger than the
# top(smallest) number of the min-heap, remove the top number from heap and add the number from array
for i in range(k, len(nums)):
if nums[i] > minHeap[0]:
heappop(minHeap)
heappush(minHeap, nums[i])
# the heap has the top 'K' numbers, return them in a list
return list(minHeap)
def main():
print("Here are the top K numbers: " +
str(find_k_largest_numbers([3, 1, 5, 12, 2, 11], 3)))
print("Here are the top K numbers: " +
str(find_k_largest_numbers([5, 12, 11, -1, 12], 3)))
main()
'''
Time complexity
As discussed above, the time complexity of this algorithm is O(K*logK+(N-K)*logK), which is asymptotically equal to O(N*logK)
Space complexity
The space complexity will be O(K) since we need to store the top ‘K’ numbers in the heap.
'''
| true |
adcc6357695e78405d87562986d2d1b9b39a224f
|
Rust
|
basiliqio/basiliq
|
/src/basiliq_store/store/config/mod.rs
|
UTF-8
| 4,591 | 2.734375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use super::*;
mod builder_config;
mod errors;
mod mergeable;
pub use errors::{BasiliqStoreConfigError, BasiliqStoreConfigErrorSource};
use itertools::EitherOrBoth;
use itertools::Itertools;
pub use mergeable::BasiliqStoreConfigMergeable;
/// Top level of the Store configuration
///
/// Contains a list of accepted resources
#[derive(
Debug,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Deserialize,
Serialize,
Getters,
MutGetters,
Default,
)]
#[getset(get = "pub", get_mut = "pub")]
pub struct BasiliqStoreConfig {
pub(crate) resources: BTreeMap<String, BasiliqStoreResourceConfig>,
}
/// The configuration of a store resource
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, Getters, MutGetters,
)]
#[getset(get = "pub")]
pub struct BasiliqStoreResourceConfig {
pub(crate) target: BasiliqStoreTableIdentifier,
#[getset(get_mut = "pub")]
pub(crate) enabled: bool,
/// A map of the relationships
#[getset(get_mut = "pub")]
pub(crate) relationships: BTreeMap<ArcStr, BasiliqStoreRelationshipsConfig>,
}
impl BasiliqStoreConfigMergeable<BasiliqStoreResourceConfig> for BasiliqStoreResourceConfig {
fn basiliq_config_merge(
&mut self,
other: &BasiliqStoreResourceConfig,
) -> Result<(), BasiliqStoreConfigError> {
if self.target != other.target {
return Err(BasiliqStoreConfigError::TargetConfigChange);
}
let mut new_relationships: BTreeMap<ArcStr, BasiliqStoreRelationshipsConfig> =
self.relationships.clone();
self.enabled = other.enabled;
for x in self
.relationships()
.iter()
.merge_join_by(other.relationships().iter(), |(_k1, v1), (_k2, v2)| {
v1.target().cmp(v2.target())
})
{
match x {
EitherOrBoth::Both((k1, v1), (k2, v2)) => {
let mut new = v1.clone();
new.basiliq_config_merge(v2)?;
if k1 != k2 {
new_relationships.remove(k1);
new_relationships.insert(k2.clone(), new);
} else if let Some(x) = new_relationships.get_mut(k1) {
*x = new
}
}
EitherOrBoth::Left((_, v1)) => {
return Err(BasiliqStoreConfigError::UnkownResource(
BasiliqStoreConfigErrorSource::BaseConfig,
v1.target().clone(),
));
}
EitherOrBoth::Right((_, v2)) => {
return Err(BasiliqStoreConfigError::UnkownResource(
BasiliqStoreConfigErrorSource::ProvidedConfig,
v2.target().clone(),
));
}
}
}
self.relationships = new_relationships;
Ok(())
}
}
/// The configuration of a store relationships
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, Getters)]
#[getset(get = "pub")]
pub struct BasiliqStoreRelationshipsThroughConfig {
/// The target table through which the relationship is made
#[serde(flatten)]
pub(crate) target: BasiliqStoreTableIdentifier,
/// The field in the target table through which the relationship is made
pub(crate) field: ArcStr,
}
/// The configuration of a store relationships
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, Getters, MutGetters,
)]
#[getset(get = "pub")]
pub struct BasiliqStoreRelationshipsConfig {
/// The target table of this relationship
pub(crate) target: BasiliqStoreTableIdentifier,
/// In case of Many-to-Many relationship, the bucket informations
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) through: Option<BasiliqStoreRelationshipsThroughConfig>,
#[getset(get_mut = "pub")]
pub(crate) enabled: bool,
/// The field in the target table beeing referenced by that relationship
pub(crate) field: ArcStr,
}
impl BasiliqStoreConfigMergeable<BasiliqStoreRelationshipsConfig>
for BasiliqStoreRelationshipsConfig
{
fn basiliq_config_merge(
&mut self,
other: &BasiliqStoreRelationshipsConfig,
) -> Result<(), BasiliqStoreConfigError> {
if self.target != other.target || self.field != other.field {
return Err(BasiliqStoreConfigError::TargetConfigChange);
}
self.enabled = other.enabled;
Ok(())
}
}
| true |
8ed0568e475af2e083fd0f452378efaad6677c53
|
Rust
|
fratorgano/advent_of_code_2020
|
/day13/src/main.rs
|
UTF-8
| 874 | 2.703125 | 3 |
[] |
no_license
|
use std::time::Instant;
fn main() {
let time = 1006401;
let buses = vec!["17","x","x","x","x","x","x","x","x","x","x","37","x",
"x","x","x","x","449","x","x","x","x","x","x","x","23",
"x","x","x","x","13","x","x","x","x","x","19","x","x",
"x","x","x","x","x","x","x","x","x","607","x","x","x",
"x","x","x","x","x","x","41","x","x","x","x","x","x",
"x","x","x","x","x","x","x","x","x","x","x","x","29"];
let start = Instant::now();
let res1 = day13::find_best_bus(time,&buses);
println!("Finished after {:?}", start.elapsed());
let start = Instant::now();
let res2 = day13::find_best_timestamp(&buses);
println!("Finished after {:?}", start.elapsed());
println!("Part 1 result: {}",res1);
println!("Part 2 result: {}",res2);
}
| true |
3499fba56824998f21fd127615d133f480aca620
|
Rust
|
paul-sud/bigbed-jaccard
|
/src/bin/bigbed_jaccard_similarity_matrix.rs
|
UTF-8
| 3,025 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
use bigbed_jaccard::bed::{get_offset_data, IntervalQuery};
use bigbed_jaccard::bottom_k::{compute_k_minhashes, jaccard, BoundedPriorityQueue};
use bigbed_jaccard::request::download_to_tempfile;
use bigtools::bbiread::BBIRead;
use bigtools::bigbedread::BigBedRead;
use csv::{Writer, WriterBuilder};
use itertools::Itertools;
use serde::Serialize;
use std::error::Error;
use std::fs::File;
use std::io::{Read, Write};
use std::path::Path;
use std::string::String;
use std::time::Instant;
use structopt::StructOpt;
use rayon::prelude::*;
const QUEUE_SIZE: usize = 100;
#[derive(Debug, Serialize)]
struct JaccardResult<'a> {
id1: &'a str,
id2: &'a str,
jaccard: f64,
}
#[derive(StructOpt)]
struct Cli {
#[structopt(parse(from_os_str))]
infile: std::path::PathBuf,
#[structopt(parse(from_os_str))]
outfile: std::path::PathBuf,
}
fn main() -> Result<(), Box<dyn Error>> {
let args = Cli::from_args();
let mut file = File::open(&args.infile)?;
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)?;
let bigbed_paths = file_contents.lines().collect::<Vec<_>>();
let minhashes = bigbed_paths
.par_iter()
.map(|&item| genome_wide_minhash(item))
.collect::<Vec<_>>();
let outfile = File::create(&args.outfile)?;
let mut writer = get_writer(&outfile);
// Note no replacement, so we don't compute jaccard of diagonal which is always 1
for pair in minhashes.iter().combinations(2) {
let start = Instant::now();
let jaccard = jaccard(&pair[0].1, &pair[1].1);
println!("Jaccard of {} and {}: {}", pair[0].0, pair[1].0, &jaccard,);
println!("Time elapsed in jaccard() is: {:?}", start.elapsed());
writer.serialize(JaccardResult {
id1: Path::new(pair[0].0).file_stem().unwrap().to_str().unwrap(),
id2: Path::new(pair[1].0).file_stem().unwrap().to_str().unwrap(),
jaccard,
})?;
}
Ok(())
}
fn genome_wide_minhash(bigbed_path: &str) -> (&str, BoundedPriorityQueue) {
let download = download_to_tempfile(bigbed_path).unwrap();
let mut reader =
BigBedRead::from_file_and_attach(download.path().to_str().unwrap().to_string()).unwrap();
let chroms = reader.get_chroms();
let queries = chroms
.iter()
.map(|chrom| IntervalQuery::new(chrom.name.to_string(), 0, chrom.length))
.collect::<Vec<_>>();
let mut start = Instant::now();
let data = get_offset_data(&mut reader, &queries).unwrap();
let mut duration = start.elapsed();
println!("Time elapsed in getting_data() is: {:?}", duration);
start = Instant::now();
let mut minhashes = compute_k_minhashes(&data, QUEUE_SIZE);
duration = start.elapsed();
println!("Time elapsed in compute_k_minhashes() is: {:?}", duration);
minhashes.shrink_to_queue_size();
(bigbed_path, minhashes)
}
fn get_writer<W: Write>(wtr: W) -> Writer<W> {
WriterBuilder::new().delimiter(b'\t').from_writer(wtr)
}
| true |
a699ac46ddcb5fdf5f0a43972be571787b63fd0d
|
Rust
|
EFanZh/LeetCode
|
/src/problem_1078_occurrences_after_bigram/mod.rs
|
UTF-8
| 722 | 3.34375 | 3 |
[] |
no_license
|
pub mod iterative;
pub trait Solution {
fn find_ocurrences(text: String, first: String, second: String) -> Vec<String>;
}
#[cfg(test)]
mod tests {
use super::Solution;
pub fn run<S: Solution>() {
let test_cases = [
(
("alice is a good girl she is a good student", "a", "good"),
&["girl", "student"] as &[_],
),
(("we will we will rock you", "we", "will"), &["we", "rock"] as &[_]),
];
for ((text, first, second), expected) in test_cases {
assert_eq!(
S::find_ocurrences(text.to_string(), first.to_string(), second.to_string()),
expected
);
}
}
}
| true |
d2c5437de57ac5cc26bde91862b7e4f9e2bdf0f2
|
Rust
|
emmanueltouzery/zbus
|
/zvariant/src/framing_offset_size.rs
|
UTF-8
| 4,001 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
use crate::{Error, Result};
use byteorder::{ByteOrder, WriteBytesExt, LE};
// Used internally for GVariant encoding and decoding.
//
// GVariant containers keeps framing offsets at the end and size of these offsets is dependent on
// the size of the container (which includes offsets themselves.
#[derive(Copy, Clone, Debug, PartialEq)]
#[repr(usize)]
pub(crate) enum FramingOffsetSize {
U8 = 1,
U16 = 2,
U32 = 4,
U64 = 8,
U128 = 16,
}
impl FramingOffsetSize {
pub(crate) fn for_bare_container(container_len: usize, num_offsets: usize) -> Self {
let mut offset_size = FramingOffsetSize::U8;
loop {
if container_len + num_offsets * (offset_size as usize) <= offset_size.max() {
return offset_size;
}
offset_size = offset_size
.bump_up()
.expect("Can't handle container too large for a 128-bit pointer");
}
}
pub(crate) fn for_encoded_container(container_len: usize) -> Self {
Self::for_bare_container(container_len, 0)
}
pub(crate) fn write_offset<W>(self, writer: &mut W, offset: usize) -> Result<()>
where
W: std::io::Write,
{
match self {
FramingOffsetSize::U8 => writer.write_u8(offset as u8),
FramingOffsetSize::U16 => writer.write_u16::<LE>(offset as u16),
FramingOffsetSize::U32 => writer.write_u32::<LE>(offset as u32),
FramingOffsetSize::U64 => writer.write_u64::<LE>(offset as u64),
FramingOffsetSize::U128 => writer.write_u128::<LE>(offset as u128),
}
.map_err(Error::Io)
}
pub fn read_last_offset_from_buffer(self, buffer: &[u8]) -> usize {
if buffer.is_empty() {
return 0;
}
let end = buffer.len();
match self {
FramingOffsetSize::U8 => buffer[end - 1] as usize,
FramingOffsetSize::U16 => LE::read_u16(&buffer[end - 2..end]) as usize,
FramingOffsetSize::U32 => LE::read_u32(&buffer[end - 4..end]) as usize,
FramingOffsetSize::U64 => LE::read_u64(&buffer[end - 8..end]) as usize,
FramingOffsetSize::U128 => LE::read_u128(&buffer[end - 16..end]) as usize,
}
}
fn max(self) -> usize {
match self {
FramingOffsetSize::U8 => u8::MAX as usize,
FramingOffsetSize::U16 => u16::MAX as usize,
FramingOffsetSize::U32 => u32::MAX as usize,
FramingOffsetSize::U64 => u64::MAX as usize,
FramingOffsetSize::U128 => u128::MAX as usize,
}
}
fn bump_up(self) -> Option<Self> {
match self {
FramingOffsetSize::U8 => Some(FramingOffsetSize::U16),
FramingOffsetSize::U16 => Some(FramingOffsetSize::U32),
FramingOffsetSize::U32 => Some(FramingOffsetSize::U64),
FramingOffsetSize::U64 => Some(FramingOffsetSize::U128),
FramingOffsetSize::U128 => None,
}
}
}
#[cfg(test)]
mod tests {
use crate::framing_offset_size::FramingOffsetSize;
#[test]
fn framing_offset_size_bump() {
assert_eq!(
FramingOffsetSize::for_bare_container(u8::MAX as usize - 3, 3),
FramingOffsetSize::U8
);
assert_eq!(
FramingOffsetSize::for_bare_container(u8::MAX as usize - 1, 2),
FramingOffsetSize::U16
);
assert_eq!(
FramingOffsetSize::for_bare_container(u16::MAX as usize - 4, 2),
FramingOffsetSize::U16
);
assert_eq!(
FramingOffsetSize::for_bare_container(u16::MAX as usize - 3, 2),
FramingOffsetSize::U32
);
assert_eq!(
FramingOffsetSize::for_bare_container(u32::MAX as usize - 12, 3),
FramingOffsetSize::U32
);
assert_eq!(
FramingOffsetSize::for_bare_container(u32::MAX as usize - 11, 3),
FramingOffsetSize::U64
);
}
}
| true |
d28e0fa481e7813b853ef2adc45260a8126cc3e1
|
Rust
|
io12/pwninit
|
/src/unstrip_libc.rs
|
UTF-8
| 2,889 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
use crate::elf;
use crate::libc_deb;
use crate::libc_version::LibcVersion;
use std::io::copy;
use std::io::stderr;
use std::io::stdout;
use std::io::Write;
use std::path::Path;
use std::process::Command;
use std::process::ExitStatus;
use colored::Colorize;
use ex::fs::File;
use ex::io;
use snafu::ResultExt;
use snafu::Snafu;
use tempfile::TempDir;
use version_compare::Cmp;
#[derive(Debug, Snafu)]
#[allow(clippy::enum_variant_names)]
pub enum Error {
#[snafu(display("libc ELF parse error: {}", source))]
ElfParse { source: elf::parse::Error },
#[snafu(display("libc deb error: {}", source))]
Deb { source: libc_deb::Error },
#[snafu(display("failed creating temporary directory"))]
TmpDir { source: std::io::Error },
#[snafu(display("failed running eu-unstrip, please install elfutils: {}", source))]
CmdRun { source: std::io::Error },
#[snafu(display("eu-unstrip exited with failure: {}", status))]
CmdFail { status: ExitStatus },
#[snafu(display("failed to open symbol file: {}", source))]
SymOpen { source: io::Error },
#[snafu(display("failed to open libc file: {}", source))]
LibcOpen { source: io::Error },
#[snafu(display("failed writing symbols to libc file: {}", source))]
LibcWrite { source: std::io::Error },
}
pub type Result = std::result::Result<(), Error>;
/// Download debug symbols and apply them to a libc
fn do_unstrip_libc(libc: &Path, ver: &LibcVersion) -> Result {
println!("{}", "unstripping libc".yellow().bold());
let deb_file_name = format!("libc6-dbg_{}.deb", ver);
let tmp_dir = TempDir::new().context(TmpDirSnafu)?;
let sym_path = tmp_dir.path().join("libc-syms");
let name = if version_compare::compare_to(&ver.string_short, "2.34", Cmp::Lt).unwrap() {
format!("libc-{}.so", ver.string_short)
} else {
let build_id = elf::get_build_id(libc).context(ElfParseSnafu)?;
build_id.chars().skip(2).collect::<String>() + ".debug"
};
libc_deb::write_ubuntu_pkg_file(&deb_file_name, &name, &sym_path).context(DebSnafu)?;
let out = Command::new("eu-unstrip")
.arg(libc)
.arg(&sym_path)
.output()
.context(CmdRunSnafu)?;
let _ = stderr().write_all(&out.stderr);
let _ = stdout().write_all(&out.stdout);
if !out.status.success() {
return Err(Error::CmdFail { status: out.status });
}
let mut sym_file = File::open(sym_path).context(SymOpenSnafu)?;
let mut libc_file = File::create(libc).context(LibcOpenSnafu)?;
copy(&mut sym_file, &mut libc_file).context(LibcWriteSnafu)?;
Ok(())
}
/// Download debug symbols and apply them to a libc if it doesn't have them
/// already
pub fn unstrip_libc(libc: &Path, ver: &LibcVersion) -> Result {
if !elf::has_debug_syms(libc).context(ElfParseSnafu)? {
do_unstrip_libc(libc, ver)?;
}
Ok(())
}
| true |
c3e1834244fb5b4de7cff6658eb0a6e70c8b47bc
|
Rust
|
Riey/kes-rs-blog
|
/org/20200213/instruction-future.rs
|
UTF-8
| 420 | 2.546875 | 3 |
[] |
no_license
|
pub trait InstAlloc<'s, 'c> {
fn alloc_str(&'c self, text: &'s str) -> &'c str;
}
impl<'s, 'c> InstAlloc<'s, 'c> for Bump {
#[inline(always)]
fn alloc_str(&'c self, text: &'s str) -> &'c str {
self.alloc_str(text)
}
}
pub struct DirectInstAlloc;
impl<'s> InstAlloc<'s, 's> for DirectInstAlloc {
#[inline(always)]
fn alloc_str(&'s self, text: &'s str) -> &'s str {
text
}
}
| true |
98df4f7852f4a44ace4e417cf31f84aa1ae17fb7
|
Rust
|
darkdarkfruit/zou
|
/src/http_version.rs
|
UTF-8
| 580 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
use hyper::version::HttpVersion;
/// Trait used to check HTTP Version
///
/// This trait is used to validate that a given HTTP Version match specific need.
pub trait ValidateHttpVersion {
/// Validate that the current HttpVersion is at least 1.1 to be able to download chunks.
fn greater_than_http_11(&self) -> bool;
}
impl ValidateHttpVersion for HttpVersion {
/// Check the given HttpVersion.
///
/// This version should be at least 1.1 to allow chunks downloading.
fn greater_than_http_11(&self) -> bool {
self >= &HttpVersion::Http11
}
}
| true |
14856a6562a0dd1030fc6f95d6a5f3e57ba4f744
|
Rust
|
winksaville/fuchsia
|
/third_party/rust_crates/vendor/failure/src/error/mod.rs
|
UTF-8
| 8,437 | 3.390625 | 3 |
[
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
use core::fmt::{self, Display, Debug};
use {Causes, Fail};
use backtrace::Backtrace;
use context::Context;
use compat::Compat;
#[cfg(feature = "std")]
use box_std::BoxStd;
#[cfg_attr(feature = "small-error", path = "./error_impl_small.rs")]
mod error_impl;
use self::error_impl::ErrorImpl;
#[cfg(feature = "std")]
use std::error::Error as StdError;
/// The `Error` type, which can contain any failure.
///
/// Functions which accumulate many kinds of errors should return this type.
/// All failures can be converted into it, so functions which catch those
/// errors can be tried with `?` inside of a function that returns this kind
/// of error.
///
/// In addition to implementing `Debug` and `Display`, this type carries `Backtrace`
/// information, and can be downcast into the failure that underlies it for
/// more detailed inspection.
pub struct Error {
imp: ErrorImpl,
}
impl<F: Fail> From<F> for Error {
fn from(failure: F) -> Error {
Error {
imp: ErrorImpl::from(failure)
}
}
}
impl Error {
/// Creates an `Error` from `Box<std::error::Error>`.
///
/// This method is useful for comparability with code,
/// which does not use the `Fail` trait.
///
/// # Example
///
/// ```
/// use std::error::Error as StdError;
/// use failure::Error;
///
/// fn app_fn() -> Result<i32, Error> {
/// let x = library_fn().map_err(Error::from_boxed_compat)?;
/// Ok(x * 2)
/// }
///
/// fn library_fn() -> Result<i32, Box<StdError + Sync + Send + 'static>> {
/// Ok(92)
/// }
/// ```
#[cfg(feature = "std")]
pub fn from_boxed_compat(err: Box<StdError + Sync + Send + 'static>) -> Error {
Error::from(BoxStd(err))
}
/// Return a reference to the underlying failure that this `Error`
/// contains.
pub fn as_fail(&self) -> &Fail {
self.imp.failure()
}
/// Returns the name of the underlying fail.
pub fn name(&self) -> Option<&str> {
self.as_fail().name()
}
/// Returns a reference to the underlying cause of this `Error`. Unlike the
/// method on `Fail`, this does not return an `Option`. The `Error` type
/// always has an underlying failure.
///
/// This method has been deprecated in favor of the [Error::as_fail] method,
/// which does the same thing.
#[deprecated(since = "0.1.2", note = "please use 'as_fail()' method instead")]
pub fn cause(&self) -> &Fail {
self.as_fail()
}
/// Gets a reference to the `Backtrace` for this `Error`.
///
/// If the failure this wrapped carried a backtrace, that backtrace will
/// be returned. Otherwise, the backtrace will have been constructed at
/// the point that failure was cast into the `Error` type.
pub fn backtrace(&self) -> &Backtrace {
self.imp.failure().backtrace().unwrap_or(&self.imp.backtrace())
}
/// Provides context for this `Error`.
///
/// This can provide additional information about this error, appropriate
/// to the semantics of the current layer. That is, if you have a
/// lower-level error, such as an IO error, you can provide additional context
/// about what that error means in the context of your function. This
/// gives users of this function more information about what has gone
/// wrong.
///
/// This takes any type that implements `Display`, as well as
/// `Send`/`Sync`/`'static`. In practice, this means it can take a `String`
/// or a string literal, or a failure, or some other custom context-carrying
/// type.
pub fn context<D: Display + Send + Sync + 'static>(self, context: D) -> Context<D> {
Context::with_err(context, self)
}
/// Wraps `Error` in a compatibility type.
///
/// This type implements the `Error` trait from `std::error`. If you need
/// to pass failure's `Error` to an interface that takes any `Error`, you
/// can use this method to get a compatible type.
pub fn compat(self) -> Compat<Error> {
Compat { error: self }
}
/// Attempts to downcast this `Error` to a particular `Fail` type.
///
/// This downcasts by value, returning an owned `T` if the underlying
/// failure is of the type `T`. For this reason it returns a `Result` - in
/// the case that the underlying error is of a different type, the
/// original `Error` is returned.
pub fn downcast<T: Fail>(self) -> Result<T, Error> {
self.imp.downcast().map_err(|imp| Error { imp })
}
/// Returns the "root cause" of this error - the last value in the
/// cause chain which does not return an underlying `cause`.
pub fn find_root_cause(&self) -> &Fail {
self.as_fail().find_root_cause()
}
/// Returns a iterator over the causes of this error with the cause
/// of the fail as the first item and the `root_cause` as the final item.
///
/// Use `iter_chain` to also include the fail of this error itself.
pub fn iter_causes(&self) -> Causes {
self.as_fail().iter_causes()
}
/// Returns a iterator over all fails up the chain from the current
/// as the first item up to the `root_cause` as the final item.
///
/// This means that the chain also includes the fail itself which
/// means that it does *not* start with `cause`. To skip the outermost
/// fail use `iter_causes` instead.
pub fn iter_chain(&self) -> Causes {
self.as_fail().iter_chain()
}
/// Attempts to downcast this `Error` to a particular `Fail` type by
/// reference.
///
/// If the underlying error is not of type `T`, this will return `None`.
pub fn downcast_ref<T: Fail>(&self) -> Option<&T> {
self.imp.failure().downcast_ref()
}
/// Attempts to downcast this `Error` to a particular `Fail` type by
/// mutable reference.
///
/// If the underlying error is not of type `T`, this will return `None`.
pub fn downcast_mut<T: Fail>(&mut self) -> Option<&mut T> {
self.imp.failure_mut().downcast_mut()
}
/// Deprecated alias to `find_root_cause`.
#[deprecated(since = "0.1.2", note = "please use the 'find_root_cause()' method instead")]
pub fn root_cause(&self) -> &Fail {
::find_root_cause(self.as_fail())
}
/// Deprecated alias to `iter_causes`.
#[deprecated(since = "0.1.2", note = "please use the 'iter_chain()' method instead")]
pub fn causes(&self) -> Causes {
Causes { fail: Some(self.as_fail()) }
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.imp.failure(), f)
}
}
impl Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let backtrace = self.imp.backtrace();
if backtrace.is_none() {
Debug::fmt(&self.imp.failure(), f)
} else {
write!(f, "{:?}\n\n{:?}", &self.imp.failure(), backtrace)
}
}
}
impl AsRef<Fail> for Error {
fn as_ref(&self) -> &Fail {
self.as_fail()
}
}
#[cfg(test)]
mod test {
use std::io;
use super::Error;
fn assert_just_data<T: Send + Sync + 'static>() { }
#[test]
fn assert_error_is_just_data() {
assert_just_data::<Error>();
}
#[test]
fn methods_seem_to_work() {
let io_error: io::Error = io::Error::new(io::ErrorKind::NotFound, "test");
let error: Error = io::Error::new(io::ErrorKind::NotFound, "test").into();
assert!(error.downcast_ref::<io::Error>().is_some());
let _: ::Backtrace = *error.backtrace();
assert_eq!(format!("{:?}", io_error), format!("{:?}", error));
assert_eq!(format!("{}", io_error), format!("{}", error));
drop(error);
assert!(true);
}
#[test]
fn downcast_can_be_used() {
let mut error: Error = io::Error::new(io::ErrorKind::NotFound, "test").into();
{
let real_io_error_ref = error.downcast_ref::<io::Error>().unwrap();
assert_eq!(real_io_error_ref.to_string(), "test");
}
{
let real_io_error_mut = error.downcast_mut::<io::Error>().unwrap();
assert_eq!(real_io_error_mut.to_string(), "test");
}
let real_io_error = error.downcast::<io::Error>().unwrap();
assert_eq!(real_io_error.to_string(), "test");
}
}
| true |
29ceec4842657f13e33917b43243a69ab8685f2a
|
Rust
|
sbstp/kubie
|
/src/cmd/meta.rs
|
UTF-8
| 3,956 | 2.75 | 3 |
[
"Zlib"
] |
permissive
|
use clap::Parser;
use crate::settings::ContextHeaderBehavior;
#[derive(Debug, Parser)]
#[clap(version)]
pub enum Kubie {
/// Spawn a shell in the given context. The shell is isolated from other shells.
/// Kubie shells can be spawned recursively without any issue.
#[clap(name = "ctx")]
Context {
/// Specify in which namespace of the context the shell is spawned.
#[clap(short = 'n', long = "namespace")]
namespace_name: Option<String>,
/// Specify files from which to load contexts instead of using the installed ones.
#[clap(short = 'f', long = "kubeconfig")]
kubeconfigs: Vec<String>,
/// Enter the context by spawning a new recursive shell.
#[clap(short = 'r', long = "recursive")]
recursive: bool,
/// Name of the context to enter. Use '-' to switch back to the previous context.
context_name: Option<String>,
},
/// Change the namespace in which the current shell operates. The namespace change does
/// not affect other shells.
#[clap(name = "ns")]
Namespace {
/// Enter the namespace by spawning a new recursive shell.
#[clap(short = 'r', long = "recursive")]
recursive: bool,
/// Unsets the namespace in the currently active context.
#[clap(short = 'u', long = "unset")]
unset: bool,
/// Name of the namespace to enter. Use '-' to switch back to the previous namespace.
namespace_name: Option<String>,
},
/// View info about the current kubie shell, such as the context name and the
/// current namespace.
#[clap(name = "info")]
Info(KubieInfo),
/// Execute a command inside of the given context and namespace.
#[clap(name = "exec", trailing_var_arg = true)]
Exec {
/// Name of the context in which to run the command.
context_name: String,
/// Namespace in which to run the command. This is mandatory to avoid potential errors.
namespace_name: String,
/// Exit early if a command fails when using a wildcard context.
#[clap(short = 'e', long = "exit-early")]
exit_early: bool,
/// Overrides behavior.print_context_in_exec in Kubie settings file.
#[clap(value_enum, long = "context-headers")]
context_headers_flag: Option<ContextHeaderBehavior>,
/// Command to run as well as its arguments.
args: Vec<String>,
},
#[clap(name = "export")]
Export {
/// Name of the context in which to run the command.
context_name: String,
/// Namespace in which to run the command. This is mandatory to avoid potential errors.
namespace_name: String,
},
/// Check the Kubernetes config files for issues.
#[clap(name = "lint")]
Lint,
/// Edit the given context.
#[clap(name = "edit")]
Edit {
/// Name of the context to edit.
context_name: Option<String>,
},
/// Edit kubie's config file.
#[clap(name = "edit-config")]
EditConfig,
/// Check for a Kubie update and replace Kubie's binary if needed.
/// This function can ask for sudo-mode.
#[clap(name = "update")]
#[cfg(feature = "update")]
Update,
/// Delete a context. Automatic garbage collection will be performed.
/// Dangling users and clusters will be removed.
#[clap(name = "delete")]
Delete {
/// Name of the context to edit.
context_name: Option<String>,
},
}
#[derive(Debug, Parser)]
pub struct KubieInfo {
#[clap(subcommand)]
pub kind: KubieInfoKind,
}
/// Type of info the user is requesting.
#[derive(Debug, Parser)]
pub enum KubieInfoKind {
/// Get the current shell's context name.
#[clap(name = "ctx")]
Context,
/// Get the current shell's namespace name.
#[clap(name = "ns")]
Namespace,
/// Get the current depth of contexts.
#[clap(name = "depth")]
Depth,
}
| true |
c8623c7476eef9f3d822cfcd8a9dd1d30ecba3e7
|
Rust
|
TimoFreiberg/framework-comparison-2020
|
/rocket/src/api.rs
|
UTF-8
| 1,612 | 2.515625 | 3 |
[] |
no_license
|
use rocket_contrib::json::Json;
use rocket::http::Status;
use crate::{
footballer::{Footballer, NewFootballer},
footballer_repository::FootballerRepository,
PgDatabase,
};
use diesel::result::Error;
use rocket::{delete, get, post, response::content};
#[get("/footballers?<position>")]
pub fn footballers_search(
connection: PgDatabase,
position: Option<String>,
) -> Result<content::Json<Json<Vec<Footballer>>>, Error> {
let footballers: Result<Vec<Footballer>, Error> = match position {
Some(pos) => connection.0.find_by_position(&pos),
None => connection.0.find_all(),
};
match footballers {
Ok(footballers) => Ok(content::Json(Json(footballers))),
Err(e) => Err(e),
}
}
#[get("/footballers/<id>")]
pub fn footballer_get(
connection: PgDatabase,
id: i64,
) -> Result<content::Json<Json<Footballer>>, Error> {
match connection.0.find_by_id(id) {
Ok(footballer) => Ok(content::Json(Json(footballer))),
Err(e) => Err(e),
}
}
#[post("/footballers", data = "<footballer>"/*, format = "json"*/)]
pub fn footballer_create(
connection: PgDatabase,
footballer: Json<NewFootballer>,
) -> Result<content::Json<Json<Footballer>>, Error> {
match connection.0.create(&footballer.0) {
Ok(footballer) => Ok(content::Json(Json(footballer))),
Err(e) => Err(e),
}
}
#[delete("/footballers/<id>")]
pub fn footballer_delete(connection: PgDatabase, id: i64) -> Status {
match connection.0.delete_by_id(id) {
Ok(_) => Status::NoContent,
Err(_) => Status::BadRequest,
}
}
| true |
6a5cc3a4dac954b244f1e3c8f358b0e7375cf39a
|
Rust
|
caklimas/rust-nes
|
/src/ppu/frame.rs
|
UTF-8
| 1,681 | 2.875 | 3 |
[] |
no_license
|
use std::fmt::{Debug, Formatter, Result};
use crate::display;
use super::colors::Color;
const BYTES_PER_COLUMN: usize = display::PIXEL_SIZE * display::BYTES_PER_COLOR;
const BYTES_PER_ROW: usize = BYTES_PER_COLUMN * display::SCREEN_WIDTH;
const BYTE_WIDTH: usize = BYTES_PER_COLUMN * display::SCREEN_WIDTH;
const BYTE_HEIGHT: usize = display::SCREEN_HEIGHT * display::PIXEL_SIZE;
pub struct Frame {
pixels: Vec<u8>
}
impl Frame {
pub fn new() -> Self {
Frame {
pixels: vec![0; BYTE_WIDTH * BYTE_HEIGHT]
}
}
pub fn set_pixel(&mut self, x: usize, y: usize, color: Color) {
if x >= display::SCREEN_WIDTH || y >= display::SCREEN_HEIGHT {
return;
}
let (red, green, blue) = color;
let y_offset = y * BYTES_PER_ROW * display::PIXEL_SIZE;
for sdl_row_num in 0..display::PIXEL_SIZE {
let row_offset = y_offset + (sdl_row_num * BYTES_PER_ROW);
let x_offset = x * BYTES_PER_COLUMN;
for sdl_col_num in 0..display::PIXEL_SIZE {
let col_offset = x_offset + (sdl_col_num * 3);
let offset = row_offset + col_offset;
self.pixels[offset] = red;
self.pixels[offset + 1] = green;
self.pixels[offset + 2] = blue;
}
}
}
pub fn get_pixels(&self) -> &[u8] {
&self.pixels
}
}
impl Default for Frame {
fn default() -> Self {
Frame::new()
}
}
impl Debug for Frame {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("Ppu2C02")
.field("pixel_length", &self.pixels.len())
.finish()
}
}
| true |
7841e9999be6c7bfd0d6ba92bca927f08d2229f0
|
Rust
|
tuzz/game-loop
|
/examples/using_wasm/src/main.rs
|
UTF-8
| 858 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
use wasm_bindgen::prelude::*;
use game_loop::game_loop;
#[wasm_bindgen(start)]
pub fn main() {
let game = Game::new();
game_loop(game, 240, 0.1, |g| {
g.game.your_update_function();
}, |g| {
g.game.your_render_function();
});
}
struct Game {
span: web_sys::Element,
counter: u32,
}
impl Game {
pub fn new() -> Self {
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let body = document.body().unwrap();
let span = document.create_element("span").unwrap();
body.append_child(&span).unwrap();
Self { span, counter: 0 }
}
pub fn your_update_function(&mut self) {
self.counter += 1;
}
pub fn your_render_function(&self) {
self.span.set_inner_html(&format!("Counter: {}", self.counter));
}
}
| true |
c6740b3e4b8229174410076902fa5902ef7186bb
|
Rust
|
martin-danhier/nolfaris
|
/src/error/types.rs
|
UTF-8
| 3,427 | 3.234375 | 3 |
[] |
no_license
|
use std::fmt::{Debug, Display};
use crate::utils::locations::{NodeLocation, InFileLocation};
use colored::Colorize;
#[derive(Debug)]
pub enum ErrorVariant {
Syntax,
Semantic,
/// Error related to a dysfunction of the compiler.
Compiler,
}
impl Display for ErrorVariant {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorVariant::Syntax => write!(f, "Syntax"),
ErrorVariant::Semantic => write!(f, "Semantic"),
ErrorVariant::Compiler => write!(f, "Compiler"),
}
}
}
/// Severity of an error
#[derive(Debug)]
pub enum Severity {
Error,
Warning,
}
impl Display for Severity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Severity::Error => write!(f, "{}", "Error".bright_red()),
Severity::Warning => write!(f, "{}", "Warning".yellow()),
}
}
}
#[derive(Debug)]
pub struct Error {
pub variant: ErrorVariant,
pub location: NodeLocation,
pub severity: Severity,
pub message: String,
pub hint: Option<String>,
pub line_content: Option<String>,
}
impl Error {
/// Returns the base part of the error message
fn base_msg_fmt(&self) -> String {
format!(
"{}: {}\n{}",
self.severity,
self.message.bold(),
self.location.as_ponctual(),
)
}
/// Returns the hint part of the error message, when pertinent
fn hint_fmt(&self) -> String {
match &self.hint {
None => String::from(""),
Some(hint) => format!("\n{} {}", "Hint:".bright_cyan(), hint),
}
}
/// Returns the preview part of the error message, when pertinent
fn preview_fmt(&self) -> String {
match &self.line_content {
None => String::from(""),
Some(line) => {
let arrow_start_pos;
let arrow_end_pos;
// Remove spaces at the start
let stripped_line = line.trim_start();
// Compute the difference to offset the carets
let delta = line.chars().count() - stripped_line.chars().count();
// Compute the position of the arrow
match self.location.location {
InFileLocation::Ponctual(pos) => {
arrow_start_pos = 2 + pos.col - delta;
arrow_end_pos = 3 + pos.col - delta;
}
InFileLocation::Span(start, end) => {
arrow_start_pos = 2 + start.col - delta;
arrow_end_pos = 3 + end.col - delta;
}
};
format!(
":\n-> {}\n{}{}",
stripped_line,
(0..arrow_start_pos).map(|_| " ").collect::<String>(),
(arrow_start_pos..arrow_end_pos)
.map(|_| "^")
.collect::<String>(),
)
}
}
}
}
impl Display for Error {
/// Returns the full and colored error message
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}{}{}",
self.base_msg_fmt(),
self.preview_fmt(),
self.hint_fmt(),
)
}
}
| true |
ee3f41b4e170cd13d261f86eb52d182dedc41503
|
Rust
|
ycd/soda
|
/src/lib.rs
|
UTF-8
| 8,943 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use std::{
borrow::{Borrow, BorrowMut},
fs::File,
io::{ErrorKind, Write},
};
use std::fs::OpenOptions;
use std::io::prelude::*;
use fern::Dispatch;
use log::{debug, error, info, trace, warn};
use pyo3::prelude::*;
use pyo3::types::{PyLong, PyUnicode};
#[pymodule]
fn soda(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Soda>()?;
Ok(())
}
/// Until https://github.com/PyO3/pyo3/issues/417
/// gets merged, we cannot bind rust enums or constants
/// as a part of module
pub enum Level {
NOTSET,
DEBUG,
INFO,
WARNING,
ERROR,
CRITICAL,
}
static dateFormat: &'static str = "[%Y-%m-%d][%H:%M:%S]";
#[pyclass(dict, subclass)]
pub struct Soda {
pub level: Level,
pub format: String,
// pub verbosity: u64
pub handlers: Handlers,
}
#[pyclass(dict, subclass)]
pub struct Handlers {
FileHandler: FileLogger,
}
#[pymethods]
impl Handlers {
#[new]
#[args(json = false, file = false)]
fn new(json: bool, file: bool) -> Handlers {
Handlers {
FileHandler: FileLogger::new(),
}
}
}
#[pymethods]
impl Soda {
#[new]
#[args(verbosity = "0")]
fn new(verbosity: u64) -> Soda {
// Create at Python runtime to make this logger globally accessable.
let mut base_config = fern::Dispatch::new();
base_config = match verbosity {
0 => base_config.level(log::LevelFilter::Info),
1 => base_config.level(log::LevelFilter::Debug),
2 => base_config.level(log::LevelFilter::Warn),
_3_or_more => base_config.level(log::LevelFilter::Trace),
};
Soda {
level: Level::NOTSET,
format: String::new(),
handlers: Handlers::new(false, false),
}
}
fn setFormat(&mut self, format: &PyUnicode) {
let format: Result<&str, PyErr> = format.to_str();
if let Ok(format) = format {
self.format = format.to_string();
}
}
fn basicConfig(&mut self, dtFormat: &PyUnicode) {
let dtFormat: String = match dtFormat.to_str() {
Ok(fmt) => fmt.to_string(),
Err(e) => {
println!(
"An error occured while reading the format {}, using the default format",
e
);
String::from(dateFormat)
}
};
let mut config = fern::Dispatch::new()
.format(move |out, message, record| {
// special format for debug messages coming from our own crate.
if record.level() > log::LevelFilter::Info && record.target() == "soda" {
out.finish(format_args!(
"---\nDEBUG: {}: {}\n---",
chrono::Local::now().format(dtFormat.as_str()),
message
))
} else {
out.finish(format_args!(
"[{}][{}][{}] {}",
chrono::Local::now().format(dtFormat.as_str()),
record.target(),
record.level(),
message
))
}
})
.chain(std::io::stdout())
.apply();
}
fn addFileHandler(&mut self, path: String) {
let f = File::open(&path);
let _: File = match f {
Ok(file) => file,
Err(error) => match error.kind() {
ErrorKind::NotFound => match File::create(&path) {
Ok(fc) => fc,
Err(e) => panic!("Problem creating the file: {:?}", e),
},
_ => panic!("an error occured {}", error),
},
};
self.handlers.FileHandler.enabled = true;
self.handlers.FileHandler.path = path;
}
fn callback(&self, message: &str) {
match self.handlers.FileHandler.enabled {
true => self.handlers.FileHandler.logger(message),
false => (),
};
// TODO(ycd): enable json logging with extra crate.
// match self.handlers.JsonHandler {
// // true => jsonLogger(message),
// true => (),
// false => (),
// };
}
fn info(&self, message: &PyUnicode) {
let message = match message.to_str() {
Ok(msg) => msg,
_ => return,
};
info!("{}", message);
self.callback(message);
}
fn warning(&mut self, message: &PyUnicode) {
let message = match message.to_str() {
Ok(msg) => msg,
_ => return,
};
warn!("{}", message);
}
fn debug(&mut self, message: &PyUnicode) {
let message = match message.to_str() {
Ok(msg) => msg,
_ => return,
};
debug!("{}", message);
self.callback(message);
}
fn trace(&mut self, message: &PyUnicode) {
let message = match message.to_str() {
Ok(msg) => msg,
_ => return,
};
trace!("{}", message);
self.callback(message);
}
fn error(&mut self, message: &PyUnicode) {
let message = match message.to_str() {
Ok(msg) => msg,
_ => return,
};
error!("{}", message);
self.callback(message);
}
pub fn setLevel(&mut self, verbosity: u8) {
match verbosity {
1 => self.level = Level::DEBUG,
2 => self.level = Level::INFO,
3 => self.level = Level::WARNING,
_ => {
println!("Found none, setting default value to 'DEBUG'");
self.level = Level::DEBUG
}
}
}
}
// fn fileLogger(message: &str) {
// let mut file = OpenOptions::new()
// .write(true)
// .append(true)
// .open(&self.path)
// .unwrap();
// if let Err(e) = writeln!(file, "{}", self.format(message)) {
// eprintln!("Couldn't write to file: {}", e);
// }
// let f = File::open(&self.path);
// let f: File = match f {
// Ok(file) => file,
// Err(error) => match error.kind() {
// ErrorKind::NotFound => match File::create(&self.path) {
// Ok(fc) => fc,
// Err(e) => panic!("Problem creating the file: {:?}", e),
// },
// _ => panic!("an error occured {}", error),
// },
// };
// }
// trait Logger {
// fn logger(message: &str);
// }
struct FileLogger {
enabled: bool,
path: String,
}
impl FileLogger {
fn new() -> FileLogger {
FileLogger {
enabled: false,
path: String::from("default.log"),
}
}
fn logger(&self, message: &str) {
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open(&self.path)
.unwrap();
if let Err(e) = writeln!(file, "{}", message) {
eprintln!("Couldn't write to file: {}", e);
}
// let f = File::open(&self.path);
// let f: File = match f {
// Ok(file) => file,
// Err(error) => match error.kind() {
// ErrorKind::NotFound => match File::create(&self.path) {
// Ok(fc) => fc,
// Err(e) => panic!("Problem creating the file: {:?}", e),
// },
// _ => panic!("an error occured {}", error),
// },
// };
}
fn format(&self, message: &str) -> String {
format!("{}", message)
}
}
// impl Soda {
// // fn _addConfig(&mut self, config: &fern::Dispatch) {
// // }
// }
// fn setup_logging(verbosity: u64) -> Result<(), fern::InitError> {
// let mut base_config = fern::Dispatch::new();
// base_config = match verbosity {
// 0 => base_config
// .level(log::LevelFilter::Info)
// .level_for("overly-verbose-target", log::LevelFilter::Warn),
// 1 => base_config
// .level(log::LevelFilter::Debug)
// .level_for("overly-verbose-target", log::LevelFilter::Info),
// 2 => base_config.level(log::LevelFilter::Debug),
// _3_or_more => base_config.level(log::LevelFilter::Trace),
// };
// // Separate file config so we can include year, month and day in file logs
// let file_config = fern::Dispatch::new()
// .format(|out, message, record| {
// out.finish(format_args!(
// "{}[{}][{}] {}",
// chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
// record.target(),
// record.level(),
// message
// ))
// })
// .chain(fern::log_file("program.log")?);
// base_config.chain(file_config);
// Ok(())
// }
| true |
0da80069d9653cc22b140b853fd0f1eb3d46665c
|
Rust
|
Frodo45127/rpfm
|
/rpfm_lib/src/schema/v4.rs
|
UTF-8
| 14,326 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
//---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { Some((name, definitions)) } else { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn from(legacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean,
FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
}
| true |
97bd363245b7035e03169e569518b9c43c85060f
|
Rust
|
mejk/grafen
|
/src/bin/main.rs
|
UTF-8
| 8,792 | 2.625 | 3 |
[
"Unlicense"
] |
permissive
|
//! Create graphene and other substrates for use in molecular dynamics simulations.
extern crate colored;
extern crate dialoguer;
extern crate serde;
extern crate serde_json;
extern crate structopt;
#[macro_use] extern crate structopt_derive;
extern crate grafen;
extern crate mdio;
mod error;
mod output;
mod ui;
use error::{GrafenCliError, Result};
use ui::read_configuration;
use grafen::database::{read_database, ComponentEntry, DataBase};
use grafen::read_conf::ReadConf;
use colored::*;
use std::env::{current_dir, home_dir, var, var_os};
use std::fs::DirBuilder;
use std::process;
use std::path::PathBuf;
use structopt::StructOpt;
const DEFAULT_DBNAME: &str = "database.json";
/// The program run configuration.
pub struct Config {
/// Title of output system.
pub title: String,
/// Path to output file.
pub output_path: PathBuf,
/// Input components that were read from the command line.
pub components: Vec<ComponentEntry>,
/// Database of residue and substrate definitions.
pub database: DataBase,
}
impl Config {
/// Parse the input command line arguments and read the `DataBase`.
///
/// # Errors
/// Returns an error if the `DataBase` (if given as an input) could not be read.
fn new() -> Result<Config> {
eprintln!("{} {}\n", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"));
let options = CliOptions::from_args();
let output_path = options.output;
let title = options.title.unwrap_or("System created by grafen".into());
let mut database = match options.database {
Some(path) => read_database(&path).map_err(|err| GrafenCliError::from(err)),
None => read_or_create_default_database(),
}?;
let (components, mut entries) = read_input_configurations(options.input_confs);
database.component_defs.append(&mut entries);
Ok(Config { title, output_path, components, database })
}
}
#[derive(StructOpt, Debug)]
#[structopt(raw(setting = "structopt::clap::AppSettings::ColoredHelp"))]
/// Command line options
struct CliOptions {
#[structopt(short = "t", long = "title")]
/// Title of output system
title: Option<String>,
#[structopt(short = "o", long = "output", default_value = "conf.gro", parse(from_os_str))]
/// Output configuration file
output: PathBuf,
#[structopt(short = "d", long = "database", parse(from_os_str))]
/// Path to residue and component database
database: Option<PathBuf>,
#[structopt(short = "c", long = "conf", parse(from_os_str))]
/// Path to input configuration files to add as components
input_confs: Vec<PathBuf>,
}
fn main() {
if let Err(err) = Config::new().and_then(|conf| ui::user_menu(conf)) {
eprintln!("{}", err);
process::exit(1);
}
}
fn read_input_configurations(confs: Vec<PathBuf>) -> (Vec<ComponentEntry>, Vec<ComponentEntry>) {
let mut configurations = Vec::new();
for path in confs {
match read_configuration(&path) {
Ok(conf) => configurations.push(conf),
Err(err) => eprintln!("{}", err),
}
}
eprint!("\n");
let current_dir = current_dir().unwrap_or(PathBuf::new());
let entries = configurations
.iter()
.map(|conf| ReadConf {
conf: None,
path: current_dir.join(&conf.path),
backup_conf: None,
description: conf.description.clone(),
volume_type: conf.volume_type.clone(),
})
.map(|conf| ComponentEntry::ConfigurationFile(conf))
.collect::<Vec<_>>();
let components = configurations
.into_iter()
.map(|conf| ComponentEntry::ConfigurationFile(conf))
.collect::<Vec<_>>();
(components, entries)
}
fn read_or_create_default_database() -> Result<DataBase> {
let default_database_paths = get_default_database_paths();
if default_database_paths.is_empty() {
eprintln!("{}", format!(
"Could not find a location for the default database. \
Opening a database which cannot be saved.",
).color("yellow"));
return Ok(DataBase::new());
}
// See if a default database can be found at any path before creating a new one.
for path in &default_database_paths {
if path.is_file() {
return read_database(&path).map_err(|err| GrafenCliError::from(err))
}
}
let mut default_database = DataBase::new();
let default_path = &default_database_paths[0];
if let Some(parent_dir) = default_path.parent() {
match DirBuilder::new().recursive(true).create(&parent_dir) {
Ok(_) => default_database.set_path(&default_path).unwrap(),
Err(err) => {
eprintln!("{}", format!(
"Warning: Could not create a folder for a default database at '{}' ({}). \
Opening a database which cannot be saved.",
default_path.display(), err
).color("yellow"));
},
}
}
Ok(default_database)
}
fn get_default_database_paths() -> Vec<PathBuf> {
get_platform_dependent_data_dirs()
.into_iter()
.map(|dir| dir.join("grafen").join(DEFAULT_DBNAME))
.collect()
}
fn get_platform_dependent_data_dirs() -> Vec<PathBuf> {
let xdg_data_dirs_variable = var("XDG_DATA_DIRS")
.unwrap_or(String::from("/usr/local/share:/usr/local"));
let xdg_dirs_iter = xdg_data_dirs_variable.split(':').map(|s| Some(PathBuf::from(s)));
let dirs = if cfg!(target_os = "macos") {
vec![
var_os("XDG_DATA_HOME").map(|dir| PathBuf::from(dir)),
home_dir().map(|dir| dir.join("Library").join("Application Support"))
].into_iter()
.chain(xdg_dirs_iter)
.chain(vec![Some(PathBuf::from("/").join("Library").join("Application Support"))])
.collect()
} else if cfg!(target_os = "linux") {
vec![
var_os("XDG_DATA_HOME").map(|dir| PathBuf::from(dir)),
home_dir().map(|dir| dir.join(".local").join("share"))
].into_iter()
.chain(xdg_dirs_iter)
.collect()
} else if cfg!(target_os = "windows") {
vec![var_os("APPDATA").map(|dir| PathBuf::from(dir))]
} else {
Vec::new()
};
dirs.into_iter().filter_map(|dir| dir).collect()
}
#[cfg(test)]
pub mod tests {
use super::*;
use std::env::set_var;
use std::path::Component;
#[test]
#[cfg(target_os = "macos")]
fn default_database_dirs_on_macos_lead_with_xdg_dirs_then_application_support() {
let xdg_data_home = "data_home";
set_var("XDG_DATA_HOME", xdg_data_home);
let xdg_data_directories = vec!["data_dir1", "data_dir2"];
let xdg_data_dirs = format!("{}:{}", xdg_data_directories[0], xdg_data_directories[1]);
set_var("XDG_DATA_DIRS", xdg_data_dirs);
let user_appsupport = home_dir().unwrap().join("Library").join("Application Support");
let root_appsupport = PathBuf::from("/").join("Library").join("Application Support");
let result = get_platform_dependent_data_dirs();
let priority_list = vec![
PathBuf::from(xdg_data_home),
user_appsupport,
PathBuf::from(xdg_data_directories[0]),
PathBuf::from(xdg_data_directories[1]),
root_appsupport
];
assert_eq!(result, priority_list);
}
#[test]
#[cfg(target_os = "linux")]
fn default_database_dirs_on_linux_lead_with_xdg_dirs_then_local_share() {
let xdg_data_home = "data_home";
set_var("XDG_DATA_HOME", xdg_data_home);
let xdg_data_directories = vec!["data_dir1", "data_dir2"];
let xdg_data_dirs = format!("{}:{}", xdg_data_directories[0], xdg_data_directories[1]);
set_var("XDG_DATA_DIRS", xdg_data_dirs);
let user_local_share = home_dir().unwrap().join(".local").join("share");
let result = get_platform_dependent_data_dirs();
let priority_list = vec![
PathBuf::from(xdg_data_home),
user_local_share,
PathBuf::from(xdg_data_directories[0]),
PathBuf::from(xdg_data_directories[1])
];
assert_eq!(result, priority_list);
}
#[test]
#[cfg(any(unix, windows))]
fn default_database_path_adds_grafen_directory_and_database_path() {
let dirs = get_default_database_paths();
assert!(!dirs.is_empty());
for path in dirs {
let mut iter = path.components().rev();
assert_eq!(iter.next().unwrap(), Component::Normal(DEFAULT_DBNAME.as_ref()));
assert_eq!(iter.next().unwrap(), Component::Normal("grafen".as_ref()));
}
}
}
| true |
5c675ac42c5d9f7898aae6c4d7c8ac5f3f1c9d9c
|
Rust
|
projectacrn/acrn-hypervisor
|
/misc/config_tools/configurator/packages/configurator/src-tauri/src/configurator.rs
|
UTF-8
| 13,561 | 2.828125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
use std::borrow::Borrow;
use std::ops::Add;
use std::path::{Path, PathBuf};
use glob::{glob_with, MatchOptions};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use tauri::Window;
use std::fs::{self, File};
use std::io;
use std::io::prelude::*;
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
#[repr(u16)]
#[non_exhaustive]
pub enum HistoryType {
WorkingFolder = 1,
Board,
Scenario,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct History {
pub working_folder: Vec<String>,
pub board_file: Vec<String>,
pub scenario_file: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ConfigData {
pub history: History,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Configurator {
pub config_write_enable: bool,
pub config_path: PathBuf,
pub config_data: ConfigData,
pub working_folder: String,
}
pub fn write_file(path: PathBuf, content: String) -> Result<(), String> {
fs::write(path, content).map_err(|e| e.to_string())
}
impl ConfigData {
fn new() -> ConfigData {
let history = History {
working_folder: vec![],
board_file: vec![],
scenario_file: vec![],
};
ConfigData { history }
}
pub fn serialize(&self) -> String {
serde_json::to_string(&self).unwrap_or_else(|_| {
let default = ConfigData::new();
ConfigData::serialize(&default)
})
}
/// deserialize data
fn deserialize(config_json: String) -> Result<ConfigData, String> {
match serde_json::from_str(&config_json.to_string()) {
Ok(config_data) => Ok(config_data),
Err(e) => Err(e.to_string()),
}
}
}
impl Configurator {
pub fn new() -> Self {
match Self::ensure_config_file() {
Ok(config_file_path) => {
// read config.json
Self::init(config_file_path)
}
Err(e) => {
log::warn!("get config file path error! error: {}", e.to_string());
log::warn!("Use blank config and disable config write to start configurator.");
Self {
config_write_enable: false,
config_path: Path::new(".").to_path_buf(),
config_data: ConfigData::new(),
working_folder: "".to_string(),
}
}
}
}
fn ensure_config_file() -> Result<PathBuf, String> {
// get config_dir or home_dir path
// Todo: 讨论fallback逻辑是否可行
let config_base = match dirs::config_dir() {
None => {
log::info!("get config_dir error! fallback to get home_dir.");
match dirs::home_dir() {
None => {
return Err("get config_dir and home_dir error!".to_string());
}
Some(path) => path,
}
}
Some(path) => path,
};
// get acrn-configurator dir path and check it exist
let config_dir = config_base.join(".acrn-configurator");
log::info!("current config_dir is {}.", config_dir.to_str().unwrap());
if !config_dir.is_dir() {
match fs::create_dir(&config_dir) {
Err(e) => {
//Todo: 明确无法创建 .acrn-configurator 文件夹时的处理逻辑
log::warn!("Create configurator config dir failed, {}", e.to_string());
return Err(e.to_string());
}
_ => {}
}
}
// get config.json file path and check it exist
let default_config_path = config_dir.join("config.json");
if !default_config_path.is_file() {
let empty_config = ConfigData::new();
match fs::write(&default_config_path, empty_config.serialize()) {
Ok(_) => {}
Err(e) => return Err(e.to_string()),
};
}
Ok(default_config_path)
}
pub fn init(config_file_path: PathBuf) -> Configurator {
let config_json = match fs::read_to_string(&config_file_path) {
Ok(data) => data,
Err(e) => {
log::warn!("read config error! error: {}", e.to_string());
log::warn!("Use default blank config to start due to read config failed.");
return Configurator {
config_write_enable: false,
config_path: config_file_path,
config_data: ConfigData::new(),
working_folder: "".to_string(),
};
}
};
let config_data = match ConfigData::deserialize(config_json) {
Ok(config_data) => {
log::info!("success load config: {}", config_data.serialize());
config_data
}
Err(e) => {
log::warn!("Deserialize json data error! error: {}", e);
log::warn!("Use default blank config to start due to deserialize config failed.");
ConfigData::new()
}
};
log::info!("Using config: {}", config_data.serialize());
Configurator {
config_write_enable: true,
config_path: config_file_path,
config_data,
working_folder: "".to_string(),
}
}
pub fn save_config(&self) {
if !self.config_write_enable {
return;
}
match fs::write(&self.config_path, self.config_data.serialize()) {
Ok(_) => {}
Err(e) => {
log::warn!("Write config error! error:{}", e.to_string())
}
}
}
pub fn add_history(&mut self, history_type: HistoryType, history_path: &Path) {
let path_string: String = history_path.to_string_lossy().parse().unwrap();
match history_type {
HistoryType::WorkingFolder => {
self.config_data
.history
.working_folder
.insert(0, path_string);
self.config_data.history.working_folder = self
.config_data
.history
.working_folder
.clone()
.into_iter()
.unique()
.collect()
}
HistoryType::Board => {
self.config_data.history.board_file.insert(0, path_string);
self.config_data.history.board_file = self
.config_data
.history
.board_file
.clone()
.into_iter()
.unique()
.collect()
}
HistoryType::Scenario => {
self.config_data
.history
.scenario_file
.insert(0, path_string);
self.config_data.history.scenario_file = self
.config_data
.history
.scenario_file
.clone()
.into_iter()
.unique()
.collect()
}
};
}
pub fn get_history(&self, history_type: HistoryType) -> &[String] {
match history_type {
HistoryType::WorkingFolder => self.config_data.history.working_folder.borrow(),
HistoryType::Board => self.config_data.history.board_file.borrow(),
HistoryType::Scenario => self.config_data.history.scenario_file.borrow(),
}
}
pub fn force_reset(&mut self) {
self.config_data = ConfigData::new();
self.save_config()
}
pub fn set_working_folder(&mut self, working_folder: String) {
self.working_folder = working_folder
}
pub fn write_board(&self, board_name: String, board_xml_string: String) -> Result<(), String> {
let options = MatchOptions {
case_sensitive: false,
..Default::default()
};
let pattern = self.working_folder.clone().add("/.*\\.board\\.xml");
let files = match glob_with(&pattern, options).map_err(|e| e.to_string()) {
Ok(files) => files,
Err(e) => return Err(e.to_string()),
};
for entry in files {
match entry {
Ok(filepath) => match fs::remove_file(&filepath) {
Ok(_) => {}
Err(e) => {
let err_msg = format!(
"Can not delete file:{} error: {}",
filepath.to_str().unwrap_or_else(|| "").to_string(),
e.to_string()
);
log::warn!("{}", err_msg);
return Err(err_msg);
}
},
Err(e) => {
log::error!("find old board error! error:{}", e.to_string())
}
}
}
let board_basename = board_name.add(".board.xml");
let board_xml_path = Path::new(&self.working_folder).join(board_basename);
write_file(board_xml_path, board_xml_string)
}
}
static mut WORKING_FOLDER: String = String::new();
#[tauri::command]
pub fn get_history(history_type: HistoryType) -> Result<String, ()> {
let configurator = Configurator::new();
let history = configurator.get_history(history_type);
// filter out empty string and not exist history path
let clean_history: Vec<&String> = match history_type {
HistoryType::WorkingFolder => history
.into_iter()
.filter(|s| !s.is_empty())
.filter(|s| Path::new(s).is_dir())
.collect::<Vec<_>>(),
_ => history
.into_iter()
.filter(|s| !s.is_empty())
.filter(|s| Path::new(s).is_file())
.collect::<Vec<_>>(),
};
let history_json_text =
serde_json::to_string(&clean_history).unwrap_or_else(|_| String::from("[]"));
Ok(history_json_text)
}
#[tauri::command]
pub fn add_history(history_type: HistoryType, history_path: String) -> Result<(), &'static str> {
let path_buf = Path::new(&history_path);
if !(path_buf.is_dir() || path_buf.is_file()) {
return Err("Not a validate dir or file path.");
}
let mut configurator = Configurator::new();
configurator.add_history(history_type, path_buf);
configurator.save_config();
Ok(())
}
#[tauri::command]
pub fn set_working_folder(working_folder: String) -> Result<(), ()> {
unsafe {
WORKING_FOLDER = working_folder;
}
Ok(())
}
#[tauri::command]
pub fn write_board(board_name: String, contents: String) -> Result<(), String> {
let mut configurator = Configurator::new();
unsafe {
configurator.set_working_folder(WORKING_FOLDER.clone());
}
configurator.write_board(board_name, contents)
}
#[tauri::command]
pub fn force_reset() -> Result<(), ()> {
let mut configurator = Configurator::new();
configurator.force_reset();
Ok(())
}
#[tauri::command]
pub fn get_home() -> Result<String, ()> {
match dirs::home_dir() {
None => Ok(String::new()),
Some(path) => Ok(path.to_str().unwrap().to_string()),
}
}
#[derive(Serialize)]
pub struct DirEntry {
path: String,
children: Option<Vec<DirEntry>>,
}
#[tauri::command]
pub fn acrn_read(file_path: &str) -> Result<String, String> {
let mut file = File::open(file_path).map_err(|e| e.to_string())?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| e.to_string())?;
Ok(contents)
}
#[tauri::command]
pub fn acrn_write(file_path: &str, contents: &str) -> Result<(), String> {
let mut file = File::create(file_path).map_err(|e| e.to_string())?;
file.write_all(contents.as_bytes())
.map_err(|e| e.to_string())?;
Ok(())
}
#[tauri::command]
pub fn acrn_is_file(path: &str) -> bool {
fs::metadata(path)
.map(|metadata| metadata.is_file())
.unwrap_or(false)
}
#[tauri::command]
pub fn acrn_create_dir(path: &str, recursive: bool) -> Result<(), String> {
if recursive {
fs::create_dir_all(path).map_err(|e| e.to_string())
} else {
fs::create_dir(path).map_err(|e| e.to_string())
}
}
fn read_dir<P: AsRef<Path>>(path: P, recursive: bool) -> io::Result<Vec<DirEntry>> {
let path = path.as_ref();
let mut entries = Vec::new();
for entry in fs::read_dir(path)? {
let entry = entry?;
let path = entry.path().to_str().unwrap().to_string();
let children = if recursive && entry.file_type()?.is_dir() {
Some(read_dir(&path, true)?)
} else {
None
};
entries.push(DirEntry { path, children });
}
Ok(entries)
}
#[tauri::command]
pub fn acrn_read_dir(path: &str, recursive: bool) -> Result<Vec<DirEntry>, String> {
read_dir(path, recursive).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn acrn_remove_dir(path: &str) -> Result<(), String> {
fs::remove_dir_all(path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn acrn_remove_file(path: &str) -> Result<(), String> {
fs::remove_file(path).map_err(|e| e.to_string())
}
#[tauri::command]
pub fn open_devtools(window: Window) {
window.open_devtools()
}
| true |
944408b0c1d294b9ecda815f0fa967c5023cc185
|
Rust
|
glittershark/achilles
|
/src/parser/mod.rs
|
UTF-8
| 5,010 | 2.828125 | 3 |
[] |
no_license
|
use nom::character::complete::{multispace0, multispace1};
use nom::error::{ErrorKind, ParseError};
use nom::{alt, char, complete, do_parse, many0, named, separated_list0, tag, terminated};
#[macro_use]
mod macros;
mod expr;
mod type_;
use crate::ast::{Arg, Decl, Fun, Ident};
pub use expr::expr;
pub use type_::type_;
pub type Error = nom::Err<nom::error::Error<String>>;
pub(crate) fn is_reserved(s: &str) -> bool {
matches!(
s,
"if" | "then"
| "else"
| "let"
| "in"
| "fn"
| "int"
| "float"
| "bool"
| "true"
| "false"
| "cstring"
)
}
pub(crate) fn ident<'a, E>(i: &'a str) -> nom::IResult<&'a str, Ident, E>
where
E: ParseError<&'a str>,
{
let mut chars = i.chars();
if let Some(f) = chars.next() {
if f.is_alphabetic() || f == '_' {
let mut idx = 1;
for c in chars {
if !(c.is_alphanumeric() || c == '_') {
break;
}
idx += 1;
}
let id = &i[..idx];
if is_reserved(id) {
Err(nom::Err::Error(E::from_error_kind(i, ErrorKind::Satisfy)))
} else {
Ok((&i[idx..], Ident::from_str_unchecked(id)))
}
} else {
Err(nom::Err::Error(E::from_error_kind(i, ErrorKind::Satisfy)))
}
} else {
Err(nom::Err::Error(E::from_error_kind(i, ErrorKind::Eof)))
}
}
named!(ascripted_arg(&str) -> Arg, do_parse!(
complete!(char!('(')) >>
multispace0 >>
ident: ident >>
multispace0 >>
complete!(char!(':')) >>
multispace0 >>
type_: type_ >>
multispace0 >>
complete!(char!(')')) >>
(Arg {
ident,
type_: Some(type_)
})
));
named!(arg(&str) -> Arg, alt!(
ident => { |ident| Arg {ident, type_: None}} |
ascripted_arg
));
named!(fun_decl(&str) -> Decl, do_parse!(
complete!(tag!("fn"))
>> multispace0
>> name: ident
>> multispace1
>> args: separated_list0!(multispace1, arg)
>> multispace0
>> char!('=')
>> multispace0
>> body: expr
>> (Decl::Fun {
name,
body: Fun {
args,
body
}
})
));
named!(ascription_decl(&str) -> Decl, do_parse!(
name: ident
>> multispace0
>> complete!(char!(':'))
>> multispace0
>> type_: type_
>> (Decl::Ascription {
name,
type_
})
));
named!(pub decl(&str) -> Decl, alt!(
ascription_decl |
fun_decl
));
named!(pub toplevel(&str) -> Vec<Decl>, terminated!(many0!(decl), multispace0));
#[cfg(test)]
mod tests {
use std::convert::TryInto;
use crate::ast::{BinaryOperator, Expr, FunctionType, Literal, Type};
use super::*;
use expr::tests::ident_expr;
#[test]
fn fn_decl() {
let res = test_parse!(decl, "fn id x = x");
assert_eq!(
res,
Decl::Fun {
name: "id".try_into().unwrap(),
body: Fun {
args: vec!["x".try_into().unwrap()],
body: *ident_expr("x"),
}
}
)
}
#[test]
fn ascripted_fn_args() {
test_parse!(ascripted_arg, "(x : int)");
let res = test_parse!(decl, "fn plus1 (x : int) = x + 1");
assert_eq!(
res,
Decl::Fun {
name: "plus1".try_into().unwrap(),
body: Fun {
args: vec![Arg {
ident: "x".try_into().unwrap(),
type_: Some(Type::Int),
}],
body: Expr::BinaryOp {
lhs: ident_expr("x"),
op: BinaryOperator::Add,
rhs: Box::new(Expr::Literal(Literal::Int(1))),
}
}
}
);
}
#[test]
fn multiple_decls() {
let res = test_parse!(
toplevel,
"fn id x = x
fn plus x y = x + y
fn main = plus (id 2) 7"
);
assert_eq!(res.len(), 3);
let res = test_parse!(
toplevel,
"fn id x = x\nfn plus x y = x + y\nfn main = plus (id 2) 7\n"
);
assert_eq!(res.len(), 3);
}
#[test]
fn top_level_ascription() {
let res = test_parse!(toplevel, "id : fn a -> a");
assert_eq!(
res,
vec![Decl::Ascription {
name: "id".try_into().unwrap(),
type_: Type::Function(FunctionType {
args: vec![Type::Var("a".try_into().unwrap())],
ret: Box::new(Type::Var("a".try_into().unwrap()))
})
}]
)
}
}
| true |
d6dd2892ac2435b54c9772b546df4d63bf258bdb
|
Rust
|
gifnksm/ProjectEulerRust
|
/src/bin/p027.rs
|
UTF-8
| 1,341 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
//! [Problem 27](https://projecteuler.net/problem=27) solver.
#![warn(
bad_style,
unused,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
use prime::PrimeSet;
// p(n) = n^2 + an + b is prime for n = 0 .. N
// p(0) = b => b must be prime
// p(1) = 1 + a + b => a > -(1+b)
// p(2) = 4 + 2a + b
fn get_limit_n(ps: &PrimeSet, a: i32, b: i32) -> u32 {
(0..)
.take_while(|&n| {
let val = n * n + a * n + b;
val >= 0 && ps.contains(val as u64)
})
.last()
.unwrap() as u32
}
fn compute(limit: u64) -> i32 {
let ps = PrimeSet::new();
let (a, b, _len) = ps
.iter()
.take_while(|&p| p < limit)
.filter_map(|p| {
let b = p as i32;
(-b..1000)
.map(|a| (a, b, get_limit_n(&ps, a, b)))
.max_by_key(|&(_a, _b, len)| len)
})
.max_by_key(|&(_a, _b, len)| len)
.unwrap();
a * b
}
fn solve() -> String {
compute(1000).to_string()
}
common::problem!("-59231", solve);
#[cfg(test)]
mod tests {
use prime::PrimeSet;
#[test]
fn primes() {
let ps = PrimeSet::new();
assert_eq!(39, super::get_limit_n(&ps, 1, 41));
assert_eq!(79, super::get_limit_n(&ps, -79, 1601))
}
}
| true |
11c2e167326c1be90e0c40ca6e7b7bbb7b328b2a
|
Rust
|
dakom/awsm-renderer
|
/demo/src/ui/primitives/button/state.rs
|
UTF-8
| 629 | 3.0625 | 3 |
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
use crate::prelude::*;
use crate::ui::primitives::image::Image;
pub struct Button {
pub style: ButtonStyle,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ButtonStyle {
Color(ButtonColor),
Image(Rc<Image>)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ButtonColor {
Primary,
Green,
Red,
}
impl Button {
pub fn new_color(color: ButtonColor) -> Rc<Self> {
Rc::new(Self {
style: ButtonStyle::Color(color),
})
}
pub fn new_image(image: Rc<Image>) -> Rc<Self> {
Rc::new(Self {
style: ButtonStyle::Image(image),
})
}
}
| true |
d6980ea432ec378bbc95e7574b63dd3010e6af66
|
Rust
|
lnds/Ogu
|
/ogu-lang/src/backend/modules/tests/test_lets_wheres.rs
|
UTF-8
| 3,228 | 2.703125 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
use crate::backend::compiler::default_sym_table;
use crate::backend::modules::tests::make_module;
use crate::backend::modules::types::basic_type::BasicType;
use crate::backend::modules::types::func_type::FuncType;
use indoc::indoc;
use crate::backend::modules::types::trait_type::TRAIT_UNKNOWN;
use crate::backend::scopes::types::TypeClone;
use crate::backend::modules::types::list_type::ListType;
#[test]
fn test_let() {
let module = make_module(
indoc! {r#"
-- taken from http://learnyouahaskell.com/syntax-in-functions#pattern-matching
str_imc w h =
let bmi = w / h ^ 2
in if bmi <= 18.5 then "You're underweight, you emo, you!"
elif bmi <= 25.0 then "You're supposedly normal. Pffft, I bet you're ugly!"
elif bmi <= 30.0 then "You're fat! Lose some weight, fatty!"
else "You're a whale, congratulations!"
"#},
default_sym_table(),
);
assert!(module.is_ok());
let module = module.unwrap();
let decls = module.get_decls();
assert_eq!(
decls[0].get_type(),
Some(FuncType::new_func_type(
Some(vec![BasicType::int(), BasicType::int()]),
BasicType::static_str(),
))
);
}
#[test]
fn test_let_cons() {
let module = make_module(
indoc! {r#"
tail x = let (x :: xs) = x in xs
t = tail [1, 2, 3]
"#},
default_sym_table(),
);
assert!(module.is_ok());
let module = module.unwrap();
let decls = module.get_decls();
assert_eq!(decls[0].get_type(), Some(FuncType::new_func_type(Some(vec![TRAIT_UNKNOWN.clone_box()]), ListType::new_list(TRAIT_UNKNOWN.clone_box()))));
assert_eq!(decls[1].get_type(), Some(ListType::new_list(BasicType::int())));
}
#[test]
fn test_let_2() {
let module = make_module(
indoc! {r#"
a = let a = 10 in let a = a * 1.0 in let a = a // 2 in a
b = let a = 10 in let a = a * 1.0 in let a = a / 2 in a
"#},
default_sym_table(),
);
assert!(module.is_ok());
let module = module.unwrap();
let decls = module.get_decls();
assert_eq!(decls[0].get_type(), Some(BasicType::int()));
assert_eq!(decls[1].get_type(), Some(BasicType::float()));
}
#[test]
fn test_where() {
let module = make_module(
indoc! {r#"
-- taken from http://learnyouahaskell.com/syntax-in-functions#pattern-matching
str_imc w h =
if bmi <= 18.5 then "You're underweight, you emo, you!"
elif bmi <= 25.0 then "You're supposedly normal. Pffft, I bet you're ugly!"
elif bmi <= 30.0 then "You're fat! Lose some weight, fatty!"
else "You're a whale, congratulations!"
where
bmi = w / h ^ 2
"#},
default_sym_table(),
);
assert!(module.is_ok());
let module = module.unwrap();
let decls = module.get_decls();
assert_eq!(
decls[0].get_type(),
Some(FuncType::new_func_type(
Some(vec![BasicType::int(), BasicType::int()]),
BasicType::static_str(),
))
);
}
| true |
7433cfb3994ffdf32aa9c168fcda8128d76eb529
|
Rust
|
datariot/msgpack-rust
|
/rmp/src/encode.rs
|
UTF-8
| 31,159 | 2.984375 | 3 |
[] |
no_license
|
//! Provides various functions and structs for MessagePack encoding.
use std::convert::From;
use std::error::Error;
use std::fmt;
use std::io;
use std::io::Write;
use std::result::Result;
use byteorder;
use byteorder::WriteBytesExt;
use super::Marker;
#[path = "encode/value_ref.rs"]
pub mod value_ref;
/// Represents an error that can occur when attempting to write MessagePack'ed value into the write.
#[derive(Debug)]
pub struct WriteError(pub io::Error);
impl Error for WriteError {
fn description(&self) -> &str { "error while writing MessagePack'ed value" }
fn cause(&self) -> Option<&Error> {
Some(&self.0)
}
}
impl fmt::Display for WriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.description().fmt(f)
}
}
impl From<byteorder::Error> for WriteError {
fn from(err: byteorder::Error) -> WriteError {
match err {
byteorder::Error::UnexpectedEOF => {
WriteError(io::Error::new(io::ErrorKind::Other, "unexpected EOF"))
},
byteorder::Error::Io(err) => WriteError(err),
}
}
}
/// Represents an error that can occur when attempting to write marker into the write.
#[derive(Debug)]
pub struct MarkerWriteError(WriteError);
impl Error for MarkerWriteError {
fn description(&self) -> &str { "error while writing marker" }
fn cause(&self) -> Option<&Error> {
Some(&self.0)
}
}
impl fmt::Display for MarkerWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.description().fmt(f)
}
}
impl From<byteorder::Error> for MarkerWriteError {
fn from(err: byteorder::Error) -> MarkerWriteError {
MarkerWriteError(From::from(err))
}
}
/// Represents an error that can occur when attempting to write MessagePack'ed single-byte value.
#[derive(Debug)]
pub struct FixedValueWriteError(pub WriteError);
impl Error for FixedValueWriteError {
fn description(&self) -> &str { "error while writing MessagePack'ed single-byte value" }
fn cause(&self) -> Option<&Error> {
Some(&self.0)
}
}
impl fmt::Display for FixedValueWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.description().fmt(f)
}
}
/// Represents an error that can occur when attempring to write MessagePack'ed complex value into
/// the write.
#[derive(Debug)]
pub enum ValueWriteError {
/// IO error while writing marker.
InvalidMarkerWrite(WriteError),
/// IO error while writing data.
InvalidDataWrite(WriteError),
}
impl Error for ValueWriteError {
fn description(&self) -> &str { "error while writing MessagePack'ed complex value" }
fn cause(&self) -> Option<&Error> {
match *self {
ValueWriteError::InvalidMarkerWrite(ref err) => Some(err),
ValueWriteError::InvalidDataWrite(ref err) => Some(err),
}
}
}
impl fmt::Display for ValueWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.description().fmt(f)
}
}
impl From<MarkerWriteError> for ValueWriteError {
fn from(err: MarkerWriteError) -> ValueWriteError {
match err {
MarkerWriteError(err) => ValueWriteError::InvalidMarkerWrite(err),
}
}
}
impl From<FixedValueWriteError> for ValueWriteError {
fn from(err: FixedValueWriteError) -> ValueWriteError {
match err {
FixedValueWriteError(err) => ValueWriteError::InvalidMarkerWrite(err),
}
}
}
/// Attempts to write the given marker into the write and transforms any IO error to the special
/// kind of error.
fn write_marker<W>(wr: &mut W, marker: Marker) -> Result<(), MarkerWriteError>
where W: Write
{
wr.write_u8(marker.to_u8()).map_err(From::from)
}
/// Attempts to write the given fixed value (represented as marker) into the write and transforms
/// any IO error to the special kind of error.
fn write_fixval<W>(wr: &mut W, marker: Marker) -> Result<(), FixedValueWriteError>
where W: Write
{
wr.write_u8(marker.to_u8()).map_err(|err| FixedValueWriteError(From::from(err)))
}
/// Encodes and attempts to write a nil value into the given write.
///
/// According to the MessagePack specification, a nil value is represented as a single `0xc0` byte.
///
/// # Errors
///
/// This function will return `FixedValueWriteError` on any I/O error occurred while writing the nil
/// marker.
pub fn write_nil<W>(wr: &mut W) -> Result<(), FixedValueWriteError>
where W: Write
{
write_fixval(wr, Marker::Null)
}
/// Encodes and attempts to write a bool value into the given write.
///
/// According to the MessagePack specification, an encoded boolean value is represented as a single
/// byte.
///
/// # Errors
///
/// This function will return `FixedValueWriteError` on any I/O error occurred while writing the
/// boolean marker.
pub fn write_bool<W>(wr: &mut W, val: bool) -> Result<(), FixedValueWriteError>
where W: Write
{
match val {
true => write_fixval(wr, Marker::True),
false => write_fixval(wr, Marker::False)
}
}
/// Encodes and attempts to write an unsigned small integer value as a positive fixint into the
/// given write.
///
/// According to the MessagePack specification, a positive fixed integer value is represented using
/// a single byte in `[0x00; 0x7f]` range inclusively, prepended with a special marker mask.
///
/// The function is **strict** with the input arguments - it is the user's responsibility to check
/// if the value fits in the described range, otherwise it will panic.
///
/// If you are not sure if the value fits in the given range use `write_uint` instead, which
/// automatically selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `FixedValueWriteError` on any I/O error occurred while writing the
/// positive integer marker.
///
/// # Panics
///
/// Panics if `val` is greater than 127.
pub fn write_pfix<W>(wr: &mut W, val: u8) -> Result<(), FixedValueWriteError>
where W: Write
{
assert!(val < 128);
write_fixval(wr, Marker::FixPos(val))
}
/// Encodes and attempts to write a negative small integer value as a negative fixnum into the
/// given write.
///
/// According to the MessagePack specification, a negative fixed integer value is represented using
/// a single byte in `[0xe0; 0xff]` range inclusively, prepended with a special marker mask.
///
/// The function is **strict** with the input arguments - it is the user's responsibility to check
/// if the value fits in the described range, otherwise it will panic.
///
/// If you are not sure if the value fits in the given range use `write_sint` instead, which
/// automatically selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `FixedValueWriteError` on any I/O error occurred while writing the
/// positive integer marker.
///
/// # Panics
///
/// Panics if `val` does not fit in `[-32; 0)` range.
pub fn write_nfix<W>(wr: &mut W, val: i8) -> Result<(), FixedValueWriteError>
where W: Write
{
assert!(-32 <= val && val < 0);
write_fixval(wr, Marker::FixNeg(val))
}
// TODO: Eliminate this or not?
macro_rules! make_write_data_fn {
(deduce, $writer:ident, $encoder:ident, 0, $val:ident)
=> ($writer.$encoder($val););
(deduce, $writer:ident, $encoder:ident, 1, $val:ident)
=> ($writer.$encoder::<byteorder::BigEndian>($val););
(gen, $t:ty, $d:tt, $name:ident, $encoder:ident) => {
fn $name<W>(wr: &mut W, val: $t) -> Result<(), ValueWriteError>
where W: Write
{
match make_write_data_fn!(deduce, wr, $encoder, $d, val) {
Ok(data) => Ok(data),
Err(err) => Err(ValueWriteError::InvalidDataWrite(From::from(err))),
}
}
};
(u8, $name:ident, $encoder:ident) => (make_write_data_fn!(gen, u8, 0, $name, $encoder););
(i8, $name:ident, $encoder:ident) => (make_write_data_fn!(gen, i8, 0, $name, $encoder););
($t:ty, $name:ident, $encoder:ident) => (make_write_data_fn!(gen, $t, 1, $name, $encoder););
}
make_write_data_fn!(u8, write_data_u8, write_u8);
make_write_data_fn!(u16, write_data_u16, write_u16);
make_write_data_fn!(u32, write_data_u32, write_u32);
make_write_data_fn!(u64, write_data_u64, write_u64);
make_write_data_fn!(i8, write_data_i8, write_i8);
make_write_data_fn!(i16, write_data_i16, write_i16);
make_write_data_fn!(i32, write_data_i32, write_i32);
make_write_data_fn!(i64, write_data_i64, write_i64);
make_write_data_fn!(f32, write_data_f32, write_f32);
make_write_data_fn!(f64, write_data_f64, write_f64);
/// Encodes and attempts to write an `u8` value as a 2-byte sequence into the given write.
///
/// The first byte becomes the marker and the second one will represent the data itself.
///
/// Note, that this function will encode the given value in 2-byte sequence no matter what, even if
/// the value can be represented using single byte as a positive fixnum.
///
/// If you need to fit the given buffer efficiently use `write_uint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
///
/// # Examples
/// ```
/// use rmp::encode::write_u8;
///
/// let mut buf = [0x00, 0x00];
///
/// write_u8(&mut &mut buf[..], 146).ok().unwrap();
/// assert_eq!([0xcc, 0x92], buf);
///
/// // Note, that 42 can be represented simply as `[0x2a]`, but the function emits 2-byte sequence.
/// write_u8(&mut &mut buf[..], 42).ok().unwrap();
/// assert_eq!([0xcc, 0x2a], buf);
/// ```
pub fn write_u8<W>(wr: &mut W, val: u8) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::U8));
write_data_u8(wr, val)
}
/// Encodes and attempts to write an `u16` value strictly as a 3-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 3-byte sequence no matter what, even if
/// the value can be represented using single byte as a positive fixnum.
///
/// If you need to fit the given buffer efficiently use `write_uint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_u16<W>(wr: &mut W, val: u16) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::U16));
write_data_u16(wr, val)
}
/// Encodes and attempts to write an `u32` value strictly as a 5-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 5-byte sequence no matter what, even if
/// the value can be represented using single byte as a positive fixnum.
///
/// If you need to fit the given buffer efficiently use `write_uint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_u32<W>(wr: &mut W, val: u32) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::U32));
write_data_u32(wr, val)
}
/// Encodes and attempts to write an `u64` value strictly as a 9-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 9-byte sequence no matter what, even if
/// the value can be represented using single byte as a positive fixnum.
///
/// If you need to fit the given buffer efficiently use `write_uint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_u64<W>(wr: &mut W, val: u64) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::U64));
write_data_u64(wr, val)
}
/// Encodes and attempts to write an `i8` value as a 2-byte sequence into the given write.
///
/// The first byte becomes the marker and the second one will represent the data itself.
///
/// Note, that this function will encode the given value in 2-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i8 marker (`0xd0`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
///
/// # Examples
/// ```
/// use rmp::encode::write_i8;
///
/// let mut buf = [0x00, 0x00];
///
/// write_i8(&mut &mut buf[..], 42).ok().unwrap();
/// assert_eq!([0xd0, 0x2a], buf);
///
/// // Note, that -18 can be represented simply as `[0xee]`, but the function emits 2-byte sequence.
/// write_i8(&mut &mut buf[..], -18).ok().unwrap();
/// assert_eq!([0xd0, 0xee], buf);
/// ```
pub fn write_i8<W>(wr: &mut W, val: i8) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::I8));
write_data_i8(wr, val)
}
/// Encodes and attempts to write an `i16` value as a 3-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 3-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i16 marker (`0xd1`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_i16<W>(wr: &mut W, val: i16) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::I16));
write_data_i16(wr, val)
}
/// Encodes and attempts to write an `i32` value as a 5-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 5-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i32 marker (`0xd2`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_i32<W>(wr: &mut W, val: i32) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::I32));
write_data_i32(wr, val)
}
/// Encodes and attempts to write an `i64` value as a 9-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 9-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i16 marker (`0xd3`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_i64<W>(wr: &mut W, val: i64) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::I64));
write_data_i64(wr, val)
}
/// Encodes and attempts to write an `u64` value into the given write using the most efficient
/// representation, returning the marker used.
///
/// This function obeys the MessagePack specification, which requires that the serializer SHOULD use
/// the format which represents the data in the smallest number of bytes.
///
/// The first byte becomes the marker and the others (if present, up to 9) will represent the data
/// itself.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
// TODO: Replace with `match`?
pub fn write_uint<W>(wr: &mut W, val: u64) -> Result<Marker, ValueWriteError>
where W: Write
{
if val < 128 {
let marker = Marker::FixPos(val as u8);
try!(write_fixval(wr, marker));
Ok(marker)
} else if val < 256 {
write_u8(wr, val as u8).and(Ok(Marker::U8))
} else if val < 65536 {
write_u16(wr, val as u16).and(Ok(Marker::U16))
} else if val < 4294967296 {
write_u32(wr, val as u32).and(Ok(Marker::U32))
} else {
write_u64(wr, val).and(Ok(Marker::U64))
}
}
/// Encodes and attempts to write an `i64` value into the given write using the most efficient
/// representation, returning the marker used.
///
/// This function obeys the MessagePack specification, which requires that the serializer SHOULD use
/// the format which represents the data in the smallest number of bytes, with the exception of
/// sized/unsized types.
///
/// Note, that the function will **always** use signed integer representation even if the value can
/// be more efficiently represented using unsigned integer encoding.
///
/// The first byte becomes the marker and the others (if present, up to 9) will represent the data
/// itself.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_sint<W>(wr: &mut W, val: i64) -> Result<Marker, ValueWriteError>
where W: Write
{
if -32 <= val && val <= 0 {
let marker = Marker::FixNeg(val as i8);
try!(write_fixval(wr, marker));
Ok(marker)
} else if -128 <= val && val < 128 {
write_i8(wr, val as i8).and(Ok(Marker::I8))
} else if -32768 <= val && val < 32768 {
write_i16(wr, val as i16).and(Ok(Marker::I16))
} else if -2147483648 <= val && val <= 2147483647 {
write_i32(wr, val as i32).and(Ok(Marker::I32))
} else {
write_i64(wr, val).and(Ok(Marker::I64))
}
}
/// Encodes and attempts to write an `i64` value using the most effective representation.
pub fn write_sint_eff<W>(wr: &mut W, val: i64) -> Result<Marker, ValueWriteError>
where W: Write
{
match val {
val if -32 <= val && val < 0 => {
let marker = Marker::FixNeg(val as i8);
try!(write_fixval(wr, marker));
Ok(marker)
}
val if -128 <= val && val < -32 => {
write_i8(wr, val as i8).and(Ok(Marker::I8))
}
val if -32768 <= val && val < -128 => {
write_i16(wr, val as i16).and(Ok(Marker::I16))
}
val if -2147483648 <= val && val < -32768 => {
write_i32(wr, val as i32).and(Ok(Marker::I32))
}
val if val < -2147483648 => {
write_i64(wr, val).and(Ok(Marker::I64))
}
val if 0 <= val && val < 128 => {
let marker = Marker::FixPos(val as u8);
try!(write_fixval(wr, marker));
Ok(marker)
}
val if val < 256 => {
write_u8(wr, val as u8).and(Ok(Marker::U8))
}
val if val < 65536 => {
write_u16(wr, val as u16).and(Ok(Marker::U16))
}
val if val < 4294967296 => {
write_u32(wr, val as u32).and(Ok(Marker::U32))
}
val => {
write_i64(wr, val).and(Ok(Marker::I64))
}
}
}
/// Encodes and attempts to write an `f32` value as a 5-byte sequence into the given write.
///
/// The first byte becomes the `f32` marker and the others will represent the data itself.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_f32<W>(wr: &mut W, val: f32) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::F32));
write_data_f32(wr, val)
}
/// Encodes and attempts to write an `f64` value as a 9-byte sequence into the given write.
///
/// The first byte becomes the `f64` marker and the others will represent the data itself.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_f64<W>(wr: &mut W, val: f64) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_marker(wr, Marker::F64));
write_data_f64(wr, val)
}
/// Encodes and attempts to write the most efficient string length implementation to the given
/// write, returning the marker used.
///
/// This function is useful when you want to get full control for writing the data itself, for
/// example, when using non-blocking socket.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_str_len<W>(wr: &mut W, len: u32) -> Result<Marker, ValueWriteError>
where W: Write
{
if len < 32 {
let marker = Marker::FixStr(len as u8);
try!(write_fixval(wr, marker));
Ok(marker)
} else if len < 256 {
try!(write_marker(wr, Marker::Str8));
write_data_u8(wr, len as u8).and(Ok(Marker::Str8))
} else if len < 65536 {
try!(write_marker(wr, Marker::Str16));
write_data_u16(wr, len as u16).and(Ok(Marker::Str16))
} else {
try!(write_marker(wr, Marker::Str32));
write_data_u32(wr, len).and(Ok(Marker::Str32))
}
}
/// Encodes and attempts to write the most efficient string implementation to the given `Write`.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
// TODO: Docs, range check, example, visibility.
pub fn write_str<W>(wr: &mut W, data: &str) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_str_len(wr, data.len() as u32));
wr.write_all(data.as_bytes()).map_err(|err| ValueWriteError::InvalidDataWrite(WriteError(err)))
}
/// Encodes and attempts to write the most efficient binary array length implementation to the given
/// write, returning the marker used.
///
/// This function is useful when you want to get full control for writing the data itself, for
/// example, when using non-blocking socket.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_bin_len<W>(wr: &mut W, len: u32) -> Result<Marker, ValueWriteError>
where W: Write
{
if len < 256 {
try!(write_marker(wr, Marker::Bin8));
write_data_u8(wr, len as u8).and(Ok(Marker::Bin8))
} else if len < 65536 {
try!(write_marker(wr, Marker::Bin16));
write_data_u16(wr, len as u16).and(Ok(Marker::Bin16))
} else {
try!(write_marker(wr, Marker::Bin32));
write_data_u32(wr, len).and(Ok(Marker::Bin32))
}
}
/// Encodes and attempts to write the most efficient binary implementation to the given `Write`.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
// TODO: Docs, range check, example, visibility.
pub fn write_bin<W>(wr: &mut W, data: &[u8]) -> Result<(), ValueWriteError>
where W: Write
{
try!(write_bin_len(wr, data.len() as u32));
wr.write_all(data).map_err(|err| ValueWriteError::InvalidDataWrite(WriteError(err)))
}
/// Encodes and attempts to write the most efficient array length implementation to the given write,
/// returning the marker used.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_array_len<W>(wr: &mut W, len: u32) -> Result<Marker, ValueWriteError>
where W: Write
{
if len < 16 {
let marker = Marker::FixArray(len as u8);
try!(write_fixval(wr, marker));
Ok(marker)
} else if len < 65536 {
try!(write_marker(wr, Marker::Array16));
write_data_u16(wr, len as u16).and(Ok(Marker::Array16))
} else {
try!(write_marker(wr, Marker::Array32));
write_data_u32(wr, len).and(Ok(Marker::Array32))
}
}
/// Encodes and attempts to write the most efficient map length implementation to the given write,
/// returning the marker used.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
pub fn write_map_len<W>(wr: &mut W, len: u32) -> Result<Marker, ValueWriteError>
where W: Write
{
if len < 16 {
let marker = Marker::FixMap(len as u8);
try!(write_fixval(wr, marker));
Ok(marker)
} else if len < 65536 {
try!(write_marker(wr, Marker::Map16));
write_data_u16(wr, len as u16).and(Ok(Marker::Map16))
} else {
try!(write_marker(wr, Marker::Map32));
write_data_u32(wr, len).and(Ok(Marker::Map32))
}
}
/// Encodes and attempts to write the most efficient ext metadata implementation to the given
/// write, returning the marker used.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data, except the EINTR, which is handled internally.
///
/// # Panics
///
/// Panics if `typeid` is negative, because it is reserved for future MessagePack extension
/// including 2-byte type information.
pub fn write_ext_meta<W>(wr: &mut W, len: u32, typeid: i8) -> Result<Marker, ValueWriteError>
where W: Write
{
assert!(typeid >= 0);
let marker = match len {
1 => {
try!(write_marker(wr, Marker::FixExt1));
Marker::FixExt1
}
2 => {
try!(write_marker(wr, Marker::FixExt2));
Marker::FixExt2
}
4 => {
try!(write_marker(wr, Marker::FixExt4));
Marker::FixExt4
}
8 => {
try!(write_marker(wr, Marker::FixExt8));
Marker::FixExt8
}
16 => {
try!(write_marker(wr, Marker::FixExt16));
Marker::FixExt16
}
len if len < 256 => {
try!(write_marker(wr, Marker::Ext8));
try!(write_data_u8(wr, len as u8));
Marker::Ext8
}
len if len < 65536 => {
try!(write_marker(wr, Marker::Ext16));
try!(write_data_u16(wr, len as u16));
Marker::Ext16
}
len => {
try!(write_marker(wr, Marker::Ext32));
try!(write_data_u32(wr, len));
Marker::Ext32
}
};
try!(write_data_i8(wr, typeid));
Ok(marker)
}
pub mod value {
use std::convert::From;
use std::fmt;
use std::io::Write;
use std::result::Result;
pub use super::super::value::{
Integer,
Float,
Value,
};
use super::*;
#[derive(Debug)]
pub enum Error {
// TODO: Will be replaced with more concrete values.
UnstableCommonError(String),
}
impl ::std::error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::UnstableCommonError(ref s) => s
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
::std::error::Error::description(self).fmt(f)
}
}
impl From<FixedValueWriteError> for Error {
fn from(err: FixedValueWriteError) -> Error {
match err {
FixedValueWriteError(..) => Error::UnstableCommonError("fixed value error".to_string())
}
}
}
impl From<ValueWriteError> for Error {
fn from(_: ValueWriteError) -> Error {
Error::UnstableCommonError("value error".to_string())
}
}
/// Encodes and attempts to write the most efficient representation of the given Value.
///
/// # Note
///
/// All instances of `ErrorKind::Interrupted` are handled by this function and the underlying
/// operation is retried.
// TODO: Docs. Examples.
pub fn write_value<W>(wr: &mut W, val: &Value) -> Result<(), Error>
where W: Write
{
match val {
&Value::Nil => try!(write_nil(wr)),
&Value::Boolean(val) => try!(write_bool(wr, val)),
// TODO: Replace with generic write_int(...).
&Value::Integer(Integer::U64(val)) => {
try!(write_uint(wr, val));
}
&Value::Integer(Integer::I64(val)) => {
try!(write_sint(wr, val));
}
// TODO: Replace with generic write_float(...).
&Value::Float(Float::F32(val)) => try!(write_f32(wr, val)),
&Value::Float(Float::F64(val)) => try!(write_f64(wr, val)),
&Value::String(ref val) => {
try!(write_str(wr, &val));
}
&Value::Binary(ref val) => {
try!(write_bin(wr, &val));
}
&Value::Array(ref val) => {
try!(write_array_len(wr, val.len() as u32));
for item in val {
try!(write_value(wr, item));
}
}
&Value::Map(ref val) => {
try!(write_map_len(wr, val.len() as u32));
for &(ref key, ref val) in val {
try!(write_value(wr, key));
try!(write_value(wr, val));
}
}
&Value::Ext(ty, ref data) => {
try!(write_ext_meta(wr, data.len() as u32, ty));
try!(wr.write_all(data).map_err(|err| ValueWriteError::InvalidDataWrite(WriteError(err))));
}
}
Ok(())
}
} // mod value
| true |
08ec5c239dffdd97f60dbd4626bdc17d47a58919
|
Rust
|
ia7ck/competitive-programming
|
/AtCoder/abc236/src/bin/b/main.rs
|
UTF-8
| 799 | 2.625 | 3 |
[] |
no_license
|
use input_i_scanner::InputIScanner;
fn main() {
let stdin = std::io::stdin();
let mut _i_i = InputIScanner::from(stdin.lock());
macro_rules! scan {
(($($t: ty),+)) => {
($(scan!($t)),+)
};
($t: ty) => {
_i_i.scan::<$t>() as $t
};
(($($t: ty),+); $n: expr) => {
std::iter::repeat_with(|| scan!(($($t),+))).take($n).collect::<Vec<_>>()
};
($t: ty; $n: expr) => {
std::iter::repeat_with(|| scan!($t)).take($n).collect::<Vec<_>>()
};
}
let n = scan!(usize);
let a = scan!(usize; n * 4 - 1);
let mut freq = vec![0; n + 1];
for a in a {
freq[a] += 1;
}
let ans = (1..=n).filter(|&i| freq[i] == 3).next().unwrap();
println!("{}", ans);
}
| true |
bc492c7fdf6e5ca89ed2ee68298e1afafc3dab07
|
Rust
|
isgasho/slipstream
|
/src/types.rs
|
UTF-8
| 6,637 | 2.578125 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#![allow(missing_docs)]
//! Type aliases of the commonly used vector types.
//!
//! While the vector types are created from the [`Packed?`][Packed2] by setting the base type and
//! length, this is seldom done in downstream code. Instead, this module provides the commonly used
//! types as aliases, like [u16x8]. See the [crate introduction](crate) for further details about the
//! naming convention.
//!
//! All these types are also exported as part of the [`prelude`][crate::prelude].
use core::num::Wrapping;
use typenum::consts::*;
pub use crate::mask::{m16, m32, m64, m8, msize};
use crate::vector::*;
pub type bx2 = Packed2<bool, U2>;
pub type bx4 = Packed4<bool, U4>;
pub type bx8 = Packed8<bool, U8>;
pub type bx16 = Packed16<bool, U16>;
pub type bx32 = Packed32<bool, U32>;
pub type m8x2 = Packed2<m8, U2>;
pub type m8x4 = Packed4<m8, U4>;
pub type m8x8 = Packed8<m8, U8>;
pub type m8x16 = Packed16<m8, U16>;
pub type m8x32 = Packed32<m8, U32>;
pub type m16x2 = Packed4<m16, U2>;
pub type m16x4 = Packed8<m16, U4>;
pub type m16x8 = Packed16<m16, U8>;
pub type m16x16 = Packed32<m16, U16>;
pub type m32x2 = Packed8<m32, U2>;
pub type m32x4 = Packed16<m32, U4>;
pub type m32x8 = Packed32<m32, U8>;
pub type m32x16 = Packed32<m32, U16>;
pub type m64x2 = Packed16<m64, U2>;
pub type m64x4 = Packed32<m64, U4>;
pub type m64x8 = Packed32<m64, U8>;
pub type m64x16 = Packed32<m64, U16>;
pub type u8x2 = Packed2<u8, U2>;
pub type u8x4 = Packed4<u8, U4>;
pub type u8x8 = Packed8<u8, U8>;
pub type u8x16 = Packed16<u8, U16>;
pub type u8x32 = Packed16<u8, U32>;
pub type u16x2 = Packed4<u16, U2>;
pub type u16x4 = Packed8<u16, U4>;
pub type u16x8 = Packed16<u16, U8>;
pub type u16x16 = Packed32<u16, U16>;
pub type u32x2 = Packed8<u32, U2>;
pub type u32x4 = Packed16<u32, U4>;
pub type u32x8 = Packed32<u32, U8>;
pub type u32x16 = Packed32<u32, U16>;
pub type u64x2 = Packed16<u64, U2>;
pub type u64x4 = Packed32<u64, U4>;
pub type u64x8 = Packed32<u64, U8>;
pub type u64x16 = Packed32<u64, U16>;
pub type wu8x2 = Packed2<Wrapping<u8>, U2>;
pub type wu8x4 = Packed4<Wrapping<u8>, U4>;
pub type wu8x8 = Packed8<Wrapping<u8>, U8>;
pub type wu8x16 = Packed16<Wrapping<u8>, U16>;
pub type wu8x32 = Packed16<Wrapping<u8>, U32>;
pub type wu16x2 = Packed4<Wrapping<u16>, U2>;
pub type wu16x4 = Packed8<Wrapping<u16>, U4>;
pub type wu16x8 = Packed16<Wrapping<u16>, U8>;
pub type wu16x16 = Packed32<Wrapping<u16>, U16>;
pub type wu32x2 = Packed8<Wrapping<u32>, U2>;
pub type wu32x4 = Packed16<Wrapping<u32>, U4>;
pub type wu32x8 = Packed32<Wrapping<u32>, U8>;
pub type wu32x16 = Packed32<Wrapping<u32>, U16>;
pub type wu64x2 = Packed16<Wrapping<u64>, U2>;
pub type wu64x4 = Packed32<Wrapping<u64>, U4>;
pub type wu64x8 = Packed32<Wrapping<u64>, U8>;
pub type wu64x16 = Packed32<Wrapping<u64>, U16>;
pub type i8x2 = Packed2<i8, U2>;
pub type i8x4 = Packed4<i8, U4>;
pub type i8x8 = Packed8<i8, U8>;
pub type i8x16 = Packed16<i8, U16>;
pub type i16x2 = Packed4<i16, U2>;
pub type i16x4 = Packed8<i16, U4>;
pub type i16x8 = Packed16<i16, U8>;
pub type i16x16 = Packed32<i16, U16>;
pub type i32x2 = Packed8<i32, U2>;
pub type i32x4 = Packed16<i32, U4>;
pub type i32x8 = Packed32<i32, U8>;
pub type i32x16 = Packed32<i32, U16>;
pub type i64x2 = Packed16<i64, U2>;
pub type i64x4 = Packed32<i64, U4>;
pub type i64x8 = Packed32<i64, U8>;
pub type i64x16 = Packed32<i64, U16>;
pub type wi8x2 = Packed2<Wrapping<i8>, U2>;
pub type wi8x4 = Packed4<Wrapping<i8>, U4>;
pub type wi8x8 = Packed8<Wrapping<i8>, U8>;
pub type wi8x16 = Packed16<Wrapping<i8>, U16>;
pub type wi16x2 = Packed4<Wrapping<i16>, U2>;
pub type wi16x4 = Packed8<Wrapping<i16>, U4>;
pub type wi16x8 = Packed16<Wrapping<i16>, U8>;
pub type wi16x16 = Packed32<Wrapping<i16>, U16>;
pub type wi32x2 = Packed8<Wrapping<i32>, U2>;
pub type wi32x4 = Packed16<Wrapping<i32>, U4>;
pub type wi32x8 = Packed32<Wrapping<i32>, U8>;
pub type wi32x16 = Packed32<Wrapping<i32>, U16>;
pub type wi64x2 = Packed16<Wrapping<i64>, U2>;
pub type wi64x4 = Packed32<Wrapping<i64>, U4>;
pub type wi64x8 = Packed32<Wrapping<i64>, U8>;
pub type wi64x16 = Packed32<Wrapping<i64>, U16>;
pub type f32x2 = Packed8<f32, U2>;
pub type f32x4 = Packed16<f32, U4>;
pub type f32x8 = Packed32<f32, U8>;
pub type f32x16 = Packed32<f32, U16>;
pub type f64x2 = Packed16<f64, U2>;
pub type f64x4 = Packed32<f64, U4>;
pub type f64x8 = Packed32<f64, U8>;
pub type f64x16 = Packed32<f64, U16>;
// Note: the usize/isize vectors are per-pointer-width because they need a different alignment.
#[cfg(target_pointer_width = "32")]
mod sized {
use super::*;
pub type msizex2 = Packed8<msize, U2>;
pub type msizex4 = Packed16<msize, U4>;
pub type msizex8 = Packed32<msize, U8>;
pub type msizex16 = Packed32<msize, U16>;
pub type usizex2 = Packed8<usize, U2>;
pub type usizex4 = Packed16<usize, U4>;
pub type usizex8 = Packed32<usize, U8>;
pub type usizex16 = Packed32<usize, U16>;
pub type wusizex2 = Packed8<Wrapping<usize>, U2>;
pub type wusizex4 = Packed16<Wrapping<usize>, U4>;
pub type wusizex8 = Packed32<Wrapping<usize>, U8>;
pub type wusizex16 = Packed32<Wrapping<usize>, U16>;
pub type isizex2 = Packed8<isize, U2>;
pub type isizex4 = Packed16<isize, U4>;
pub type isizex8 = Packed32<isize, U8>;
pub type isizex16 = Packed32<isize, U16>;
pub type wisizex2 = Packed8<Wrapping<isize>, U2>;
pub type wisizex4 = Packed16<Wrapping<isize>, U4>;
pub type wisizex8 = Packed32<Wrapping<isize>, U8>;
pub type wisizex16 = Packed32<Wrapping<isize>, U16>;
}
#[cfg(target_pointer_width = "64")]
mod sized {
use super::*;
pub type msizex2 = Packed16<msize, U2>;
pub type msizex4 = Packed32<msize, U4>;
pub type msizex8 = Packed32<msize, U8>;
pub type msizex16 = Packed32<msize, U16>;
pub type usizex2 = Packed16<usize, U2>;
pub type usizex4 = Packed32<usize, U4>;
pub type usizex8 = Packed32<usize, U8>;
pub type usizex16 = Packed32<usize, U16>;
pub type wusizex2 = Packed16<Wrapping<usize>, U2>;
pub type wusizex4 = Packed32<Wrapping<usize>, U4>;
pub type wusizex8 = Packed32<Wrapping<usize>, U8>;
pub type wusizex16 = Packed32<Wrapping<usize>, U16>;
pub type isizex2 = Packed16<isize, U2>;
pub type isizex4 = Packed32<isize, U4>;
pub type isizex8 = Packed32<isize, U8>;
pub type isizex16 = Packed32<isize, U16>;
pub type wisizex2 = Packed16<Wrapping<isize>, U2>;
pub type wisizex4 = Packed32<Wrapping<isize>, U4>;
pub type wisizex8 = Packed32<Wrapping<isize>, U8>;
pub type wisizex16 = Packed32<Wrapping<isize>, U16>;
}
pub use sized::*;
| true |
a755bb0294f1342b6e36aaddba6227af3b572f28
|
Rust
|
jayped007/wasm-game-of-life
|
/src/lib.rs
|
UTF-8
| 10,985 | 2.859375 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
// lib.rs -- RUST wasm interface for Conways game of life
mod utils;
use quad_rand;
use js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
use web_sys::console;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
// allows use of log! macro ==> e.g.
// log!("cell[{}, {}] is initially {:?} and has {} neighbors",
// row, col, cell, neighbors);
// log!(" it becomes {:?}", next_cell);
macro_rules! log {
( $( $t:tt )* ) => {
console::log_1(&format!( $( $t )* ).into());
}
}
// Timer generic for using web_sys::console::time and timeEnd.
// Use new() constructor to call time and
// use drop(&mut self) to call timeEnd.
// So function wrapped with Timer will automatically be timed.
// Then let _timer = Timer::new("Universe::tick");
// will cause every call to tick() to be timed and logged on console
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
}
// Define a cell for the 'Universe', each 1 byte
// use repr(u8) to ensure 1 byte unsigned values
//
// NOTE: Define Dead value as zero and alive as one allow simple summing
// to determine how many live cells.
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Cell {
Dead = 0,
Alive = 1
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
fn set_cell(&mut self, cell_state: Cell) {
//log!("set_cell ({:?})", cell_state);
*self = cell_state;
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InitialPattern {
Complex1 = 0,
Random5050 = 1
}
// Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def)
// Give the width of the universe, each row of the universe is the next set
// of 'width' cells, starting with the first row from indexes 0:<width>
#[wasm_bindgen]
pub struct Universe {
width: u32, // width of each row
height: u32, // number of rows
cells: Vec<Cell>, // width*height cells, each one byte
prevcells: Vec<Cell>, // cells from previous tick
mousedown: bool // set when shift-click event, so that associated click ignored
}
// methods for Universe, but not exposed to JS
impl Universe
{
// get_index - Return 1D array index of Cell at position (row,column) in Universe
fn get_index(&self, row: u32, column: u32) -> usize
{
(row * self.width + column) as usize
}
// Count live neighbors of cell at (row, column)
fn live_neighbor_count(&self, row: u32, col: u32) -> u8
{
// avoid modulus, division slows us down as seen in profiling
let up = if row == 0 { self.height - 1 } else { row - 1 };
let down = if row == self.height - 1 { 0 } else { row + 1 };
let left = if col == 0 { self.width - 1 } else { col - 1 };
let right = if col == self.width - 1 { 0 } else { col + 1 };
let neighbors =
if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 };
neighbors
}
}
// standalone method, not part of Universe directly
fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> {
// expression generating Vec<Cell>
let cells = (0..width * height).map(|_i|
{
//if pattern == InitialPattern::Complex1 {
// // hardcode-pattern, depends on 8x8 definition
// if i % 2 == 0 || i % 7 == 0 {
// Cell::Alive
// } else {
// Cell::Dead
// }
// } else { // InitialPattern::Random5050
if quad_rand::gen_range(0, 20) == 0 {
Cell::Alive
} else {
Cell::Dead
}
// }
}).collect();
cells
}
fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> {
let count = cells.len();
let inverted_cells = (0..count).map(|i|
{
if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive }
}).collect();
inverted_cells
}
// Public methods, exposed to JS
#[wasm_bindgen]
impl Universe
{
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
// set_width -- set width of Universe, set all cells to Dead state
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells =
(0..width * self.height)
.map(|_i| Cell::Dead).collect();
}
// Set the height of the Universe, set all cells to Dead state
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells =
(0..self.width * height)
.map(|_i| Cell::Dead).collect();
}
pub fn get_cell_index(&self, row: u32, column: u32) -> u32
{
row * self.width + column
}
// return pointer to 1D array of byte Cell values to JS
// NOTE: *const Cell syntax
// => pointer to non-mutable array???
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn prevcells(&self) -> *const Cell {
self.prevcells.as_ptr()
}
pub fn tick(&mut self)
{
let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console
// NOTE: timing ended when _timer falls out of scope at end of method
let mut next = self.cells.clone(); // copy of current cells, modify ==> next state
self.prevcells = next.clone(); // previous cell values
// Determine next state of Universe by applying conways' 4 rules
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0)
let neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, neighbors)
{
// Rule 1: any live cell with < 2 live neighbors dies, (loneliness)
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable)
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: any live cell with > 3 live neighbors dies (overpopulation)
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction)
(Cell::Dead, 3) => Cell::Alive,
// Otherwise -- no change
(otherwise, _) => otherwise
};
next[idx] = next_cell;
}
}
self.cells = next; // next state for Universe determined
}
// toggle cell (row, column)
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) {
let idx = self.get_index(row, column);
self.cells[idx].set_cell(valu);
}
// allow JS to determine if mousedown event occurring (shift-click)
pub fn is_mousedown(&self) -> bool {
return self.mousedown;
}
// allow JS to reset the mousedown value
pub fn set_mousedown_value(&mut self, valu: bool) {
self.mousedown = valu;
}
// Constructor, initialize the universe to hard-coded pattern
pub fn new() -> Universe
{
utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message
let now = js_sys::Date::now();
let now_date = js_sys::Date::new(&JsValue::from_f64(now));
let ms_u64: u64 = now_date.get_milliseconds() as u64;
quad_rand::srand(ms_u64); // u64
let width = 128; // was 64
let height = 128;
// Randomly decide whether to use Complex1 or Random5050
let _pattern: InitialPattern =
if quad_rand::gen_range(0, 2) == 0 {
InitialPattern::Complex1
} else {
InitialPattern::Random5050
};
let pattern = InitialPattern::Random5050;
let cells = generate_cells(width, height, pattern);
let prevcells = invert_cells(&cells);
let mousedown = false;
Universe
{
width,
height,
cells,
prevcells,
mousedown
}
}
pub fn reset_board(&mut self, pattern: InitialPattern) {
log!("reset_board() : {:?}", pattern);
let width = self.width();
let height = self.height();
self.prevcells = self.cells.clone(); // current grid, needed for correct redraw
self.cells = generate_cells(width, height, pattern);
}
}
// impl Universe block w/o wasm_bindgen attribute
// Needed for testing -- don't expose to our JS.
// Rust-generated WebAsm functions cannot return borrowed references.
// NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with
// the wasm_bindgen attribute and examine errors.
// NOTE: get_cells returns borrowed reference &self.cells
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set specific cells in a universe to Alive, give slice of (row,col) Tuples.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
// NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive
// claims immutable borrow on self.get_index call and
// mutable borrow later used here. (I don't follow personally.)
}
}
}
| true |
c6b9a7f55dcae3240ec8fac5328390db1a67f656
|
Rust
|
LordSentox/othello
|
/src/srv/gamehandler.rs
|
UTF-8
| 4,011 | 3.078125 | 3 |
[] |
no_license
|
use std::sync::{Arc, Weak, Mutex};
use super::{Game, NetHandler};
use packets::*;
use std::collections::{HashSet, VecDeque};
pub struct GameHandler {
nethandler: Arc<NetHandler>,
games: Vec<Weak<Game>>,
/// All pending requests the first id is the requester, the second the requestee who has not
/// yet answered.
pending: HashSet<(ClientId, ClientId)>,
packets: Arc<Mutex<VecDeque<(ClientId, Packet)>>>
}
impl GameHandler {
pub fn new(nethandler: Arc<NetHandler>) -> GameHandler {
// Subscribe to the NetHandler, then return the GameHandler with an empty games list, since
// naturally nothing has been requested yet.
let packets = Arc::new(Mutex::new(VecDeque::new()));
nethandler.subscribe(Arc::downgrade(&packets));
GameHandler {
nethandler: nethandler,
games: Vec::new(),
pending: HashSet::new(),
packets: packets
}
}
pub fn handle_packets(&mut self) {
loop {
let (client, packet) = match self.packets.lock().unwrap().pop_front() {
Some(cp) => cp,
None => break
};
match packet {
Packet::Disconnect => self.handle_disconnect(client),
Packet::RequestGame(to) => self.handle_game_request(client, to),
Packet::DenyGame(to) => self.handle_deny_game(client, to),
_ => {}
}
}
// Check for games that are no longer running, to prevent memory leakage in form of the
// Games-Vector just groing with long dead games.
self.games.retain(|ref game| { game.upgrade().is_some() });
}
fn handle_disconnect(&mut self, client: ClientId) {
// All game requests to the client will be denied.
for &(from, to) in &self.pending {
if to == client {
self.nethandler.send(from, &Packet::DenyGame(to));
}
}
// Remove all game requests the client was involved in.
self.pending.retain(|&(ref from, ref to)| { *from != client && *to != client });
}
fn handle_game_request(&mut self, from: ClientId, to: ClientId) {
// In case the request has already been made, it can be ignored.
if self.pending.contains(&(from, to)) {
println!("Duplicate game request from [{}] to [{}] was ignored. Still awaiting answer.", from, to);
return;
}
if self.pending.contains(&(to, from)) {
self.pending.remove(&(to, from));
println!("Starting game between [{}] and [{}]", from, to);
// There has been no explicit response, but since both have requested a game from the
// other client, we can assume that the game can be started.
self.start_game(from, to);
}
self.pending.insert((from, to));
println!("Added game request from [{}] to [{}]", from, to);
self.nethandler.send(to, &Packet::RequestGame(from));
}
fn handle_deny_game(&mut self, from: ClientId, to: ClientId) {
if !self.pending.remove(&(to, from)) {
// There was no request, so there is nothing to deny.
println!("[WARNING] Blocked DenyGame packet, since there has never been a request.");
return;
}
// There was a request, so inform the one it has been denied from of it.
self.nethandler.send(to, &Packet::DenyGame(from));
}
fn start_game(&mut self, client1: ClientId, client2: ClientId) {
let client1 = match self.nethandler.get_client(client1) {
Some(c) => c,
None => return
};
let client2 = match self.nethandler.get_client(client2) {
Some(c) => c,
None => return
};
let game = match Game::new(client1, client2) {
Some(g) => g,
None => return
};
// The game has been started successfully. Add it to the games of this GameHandler.
self.games.push(game);
}
}
| true |
5b76dbaffc58d2525992c546a43f073b2109f399
|
Rust
|
nguyenvanquanthinh97/rust-1
|
/data_structures/src/unions.rs
|
UTF-8
| 920 | 3.78125 | 4 |
[] |
no_license
|
// union will take memory depend on the largest part
// This will take 32 bits == 4 bytes
// The problem of union is not about initiate or update value of union
// The problem is about "How we get the value of union"
// For instance:
// we can update and changing value of iof
// as iof.i = 40 or iof.f = 30.5
// However, when we get value of union as println!("{}", iof.f)
// while iof is set to i with value is 1
// union will intepreter convert bit of i -> f
// the result will be: i:1 -> f:0.000000...01 (NOT the value you want)
// So you have to access union's value under unsafe scope
union IntOrFloat {
i: i32,
f: f32
}
// union
fn process_value(iof: IntOrFloat) {
unsafe {
match iof {
IntOrFloat {i: 30} => println!("meaning of life value"),
IntOrFloat {f} => println!("f = {}", f)
}
}
}
pub fn unions() {
let mut iof = IntOrFloat {f: 25.5};
iof.i = 30;
process_value(iof);
}
| true |
3085f148e21034a259869f8cdb5817f7413c972e
|
Rust
|
ChristianBeilschmidt/geo-processing-incubator
|
/mappers/src/errors.rs
|
UTF-8
| 708 | 2.625 | 3 |
[] |
no_license
|
use gdal::errors::Error as GdalError;
use serde_json::error::Error as SerdeJsonError;
error_chain! {
types {
Error, ErrorKind, ResultExt, Result;
}
foreign_links {
Io(::std::io::Error);
SerdeJson(SerdeJsonError);
GdalError(GdalError);
}
errors {
UnknownDataset(name: String) {
description("The requested dataset is unknown.")
display("There is no dataset with name: '{}'", name)
}
MissingWmsParam(param: &'static str) {
description("A mandatory WMS parameter is missing.")
display("The following WMS parameter is missing: '{}'", param)
}
}
}
| true |
f45981357305b0289fd4be3c598205f45dbb07b1
|
Rust
|
digitalarche/rustdns
|
/dig/main.rs
|
UTF-8
| 8,348 | 2.890625 | 3 |
[
"Apache-2.0"
] |
permissive
|
// Simple dig style command line.
// rustdns {record} {domain}
mod util;
use http::method::Method;
use rustdns::clients::Exchanger;
use rustdns::clients::*;
use rustdns::types::*;
use std::env;
use std::fmt;
use std::io;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::process;
use std::str::FromStr;
use std::vec;
use strum_macros::{Display, EnumString};
use url::Url;
#[cfg(test)]
#[macro_use]
extern crate pretty_assertions;
#[derive(Display, EnumString, PartialEq)]
enum Client {
Udp,
Tcp,
DoH,
}
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
struct Args {
client: Client,
servers: Vec<String>,
/// Query this types
r#type: rustdns::Type,
/// Across all these domains
domains: Vec<String>,
}
#[derive(Debug)]
struct ArgParseError {
details: String,
}
impl fmt::Display for ArgParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.details)
}
}
impl std::error::Error for ArgParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
/// Parses a string into a SocketAddr allowing for the port to be missing.
fn sockaddr_parse_with_port(
addr: &str,
default_port: u16,
) -> io::Result<vec::IntoIter<SocketAddr>> {
match addr.to_socket_addrs() {
// Try parsing again, with the default port.
Err(_e) => (addr, default_port).to_socket_addrs(),
Ok(addrs) => Ok(addrs),
}
}
/// Helper function to take a vector of domain/port numbers, and return (a possibly larger) `Vec[SocketAddr]`.
fn to_sockaddrs(
servers: Vec<String>,
default_port: u16,
) -> std::result::Result<Vec<SocketAddr>, ArgParseError> {
Ok(servers
.iter()
.map(|addr| {
// Each address could be invalid, or return multiple SocketAddr.
match sockaddr_parse_with_port(addr, default_port) {
Err(e) => Err(ArgParseError {
details: format!("failed to parse '{}': {}", addr, e),
}),
Ok(addrs) => Ok(addrs),
}
})
.collect::<std::result::Result<Vec<_>, _>>()?
// We now have a collection of vec::IntoIter<SocketAddr>, flatten.
// We would use .flat_map(), but it doesn't handle the Error case :(
.into_iter()
.flatten()
.collect())
}
impl Args {
/// Helper function to return the list of servers as a `Vec[Url]`.
fn servers_to_urls(&self) -> std::result::Result<Vec<Url>, url::ParseError> {
self.servers.iter().map(|x| x.parse()).collect()
}
}
impl Default for Args {
fn default() -> Self {
Args {
client: Client::Udp,
servers: Vec::new(),
r#type: Type::A,
domains: Vec::new(),
}
}
}
// TODO Move into a integration test (due to the use of network)
#[test]
fn test_to_sockaddrs() {
let mut servers = Vec::new();
servers.push("1.2.3.4".to_string()); // This requires using the default port.
servers.push("aaaaa.bramp.net".to_string()); // This resolves to two records.
servers.push("5.6.7.8:453".to_string()); // This uses a different port.
// This test may be flakly, if it is running in an environment that doesn't
// have both IPv4 and IPv6, and has DNS queries that can fail.
// TODO Figure out a way to make this more robust.
let mut addrs = to_sockaddrs(servers, 53).expect("resolution failed");
let mut want = vec![
"1.2.3.4:53".parse().unwrap(),
"127.0.0.1:53".parse().unwrap(),
"[::1]:53".parse().unwrap(),
"5.6.7.8:453".parse().unwrap(),
];
// Sort because [::1]:53 or 127.0.0.1:53 may switch places.
addrs.sort();
want.sort();
assert_eq!(addrs, want);
}
fn parse_args(args: impl Iterator<Item = String>) -> Result<Args> {
let mut result = Args::default();
let mut type_or_domain = Vec::<String>::new();
for arg in args {
match arg.as_str() {
"+udp" => result.client = Client::Udp,
"+tcp" => result.client = Client::Tcp,
"+doh" => result.client = Client::DoH,
_ => {
if arg.starts_with('+') {
return Err(format!("Unknown flag: {}", arg).into());
}
if arg.starts_with('@') {
result
.servers
.push(arg.strip_prefix("@").unwrap().to_string()) // Unwrap should not panic
} else {
type_or_domain.push(arg)
}
}
}
}
let mut found_type = false;
// To be useful, we allow users to say `dig A bramp.net` or `dig bramp.net A`
for arg in type_or_domain {
if !found_type {
// Use the first type we found and assume the rest are domains.
if let Ok(r#type) = Type::from_str(&arg) {
result.r#type = r#type;
found_type = true;
continue;
}
}
result.domains.push(arg)
}
if result.domains.is_empty() {
// By default query the root domain
result.domains.push(".".to_string());
if !found_type {
result.r#type = Type::NS;
}
}
if result.servers.is_empty() {
// TODO If no servers are provided determine the local server (from /etc/nslookup.conf for example)
eprintln!(";; No servers specified, using Google's DNS servers");
match result.client {
Client::Udp | Client::Tcp => {
result.servers.push("8.8.8.8".to_string());
result.servers.push("8.8.4.4".to_string());
result.servers.push("2001:4860:4860::8888".to_string());
result.servers.push("2001:4860:4860::8844".to_string());
}
Client::DoH => result
.servers
.push("https://dns.google/dns-query".to_string()),
}
/*
// TODO Create a function that returns the appropriate ones from this list:
Cisco OpenDNS:
208.67.222.222 and 208.67.220.220; TCP/UDP
https://doh.opendns.com/dns-query
Cloudflare:
1.1.1.1 and 1.0.0.1;
2606:4700:4700::1111
2606:4700:4700::1001
https://cloudflare-dns.com/dns-query
Google Public DNS:
8.8.8.8 and 8.8.4.4; and
2001:4860:4860::8888
2001:4860:4860::8844
https://dns.google/dns-query
Quad9: 9.9.9.9 and 149.112.112.112.
2620:fe::fe
2620:fe::9
https://dns.quad9.net/dns-query
tls://dns.quad9.net
*/
}
Ok(result)
}
#[tokio::main]
async fn main() -> Result<()> {
let args = match parse_args(env::args().skip(1)) {
Ok(args) => args,
Err(e) => {
eprintln!("{}", e);
eprintln!("Usage: dig [@server] {{domain}} {{type}}");
process::exit(1);
}
};
let mut query = Message::default();
for domain in &args.domains {
query.add_question(&domain, args.r#type, Class::Internet);
}
query.add_extension(Extension {
payload_size: 4096,
..Default::default()
});
println!("query:");
util::hexdump(&query.to_vec().expect("failed to encode the query"));
println!();
println!("{}", query);
// TODO make all DNS client implement a Exchange trait
let resp = match args.client {
Client::Udp => UdpClient::new(to_sockaddrs(args.servers, 53)?.as_slice())?
.exchange(&query)
.expect("could not exchange message"),
Client::Tcp => TcpClient::new(to_sockaddrs(args.servers, 53)?.as_slice())?
.exchange(&query)
.expect("could not exchange message"),
Client::DoH => DoHClient::new(args.servers_to_urls()?.as_slice(), Method::GET)?
.exchange(&query)
.await
.expect("could not exchange message"),
};
println!("response:");
println!("{}", resp);
Ok(())
}
| true |
c49f6989799b87e9bd0d36c147938d5faaaf6f92
|
Rust
|
Drumato/asmpeach
|
/src/assembler/tests/sib_byte_tests.rs
|
UTF-8
| 595 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
#[cfg(test)]
mod format_tests {
use crate::assembler::resource::*;
#[test]
fn display_sib_byte_test() {
let byte = SIBByte {
base_reg: 0,
index_reg: 2,
scale: 4,
};
assert_eq!("SIB(0b10010000)", format!("{}", byte).as_str())
}
#[test]
fn debug_sib_byte_test() {
let byte = SIBByte {
base_reg: 0,
index_reg: 2,
scale: 4,
};
assert_eq!(
"SIB(base 0b0: index 0b10: 4x scale)",
format!("{:?}", byte).as_str()
)
}
}
| true |
f4f355ab9e7cd22f420ce0e5c5809ebd16cef766
|
Rust
|
katticot/data-loader
|
/src/bin/server.rs
|
UTF-8
| 3,666 | 2.546875 | 3 |
[] |
no_license
|
use actix_web::{get, web, App, HttpResponse, HttpServer, Responder};
extern crate database;
use crate::database::{elastic, mongo, postgres, Connectable, Load, Push};
use dotenv::dotenv;
use std::env;
use std::time::{Duration, Instant};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| {
App::new()
.service(hey)
.service(elastic_handler)
.service(mongo_hanler)
.route("/", web::get().to(|| HttpResponse::Ok().body("coucou")))
})
.bind("127.0.0.1:9293")?
.run()
.await
}
#[get("/hey")]
async fn hey() -> impl Responder {
HttpResponse::Ok().body("Hello world!")
}
#[get("/elastic")]
async fn elastic_handler() -> impl Responder {
dotenv().ok();
let start = Instant::now();
let postgres_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let elasticsearch_url = env::var("ELASTICSEARCH_URL").expect("ELASTICSEARCH_URL must be set");
let data_limit = env::var("LIMIT").expect("LIMIT must be set");
let postgres_duration = start.elapsed();
let limit: i64 = data_limit.parse().unwrap();
let postgres_database = postgres::new(&postgres_url);
println!("size is {}", postgres_database.get_size());
let postgres_load_duration = Instant::now();
println!(
"postgres connection is {}",
postgres_database.check_connection()
);
let data = postgres_database.load(limit);
let postgres_load_duration = postgres_load_duration.elapsed();
let elastic_load_duration = Instant::now();
println!(
"load data into ElastiSearch in {} bulk requests",
data.len()
);
let elastic = elastic::new(&elasticsearch_url);
match elastic.bulky(&postgres_database).await {
Ok(a) => a,
_ => unreachable!(),
};
let elastic_load_duration: Duration = elastic_load_duration.elapsed();
let full_duration: Duration = start.elapsed();
println!("lancement postgres {:?}", postgres_duration);
println!("chargement postgres {:?}", postgres_load_duration);
println!("chargement elastic {:?}", elastic_load_duration);
println!("chargement global {:?}", full_duration);
HttpResponse::Ok().body("Load lancé")
}
#[get("/mongo")]
async fn mongo_hanler() -> impl Responder {
dotenv().ok();
let start = Instant::now();
let postgres_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let data_limit = env::var("LIMIT").expect("LIMIT must be set");
let postgres_duration = start.elapsed();
let limit: i64 = data_limit.parse().unwrap();
let postgres_database = postgres::new(&postgres_url);
println!("size is {}", postgres_database.get_size());
let postgres_load_duration = Instant::now();
println!(
"postgres connection is {}",
postgres_database.check_connection()
);
let data = postgres_database.load(limit);
let postgres_load_duration = postgres_load_duration.elapsed();
let mongo_load_duration = Instant::now();
println!("load data into MongoDB");
let mongo_database = mongo::new("10.31.70.236", 27017);
match mongo_database.push(&postgres_database).await {
Ok(a) => a,
_ => unreachable!(),
};
let mongo_load_duration: Duration = mongo_load_duration.elapsed();
let full_duration: Duration = start.elapsed();
println!("lancement postgres {:?}", postgres_duration);
println!("chargement postgres {:?}", postgres_load_duration);
println!("chargement mongo {:?}", mongo_load_duration);
println!("chargement global {:?}", full_duration);
HttpResponse::Ok().body("Load mongo effectué")
}
| true |
21dd2f6389b757dfa58a8eac1be914337f282c46
|
Rust
|
dennisss/dacha
|
/pkg/executor/src/linux/channel.rs
|
UTF-8
| 717 | 2.90625 | 3 |
[
"Apache-2.0"
] |
permissive
|
use common::async_std::channel;
pub struct Channel<T> {
sender: channel::Sender<T>,
receiver: channel::Receiver<T>,
}
impl<T> Channel<T> {
pub fn new() -> Self {
let (sender, receiver) = channel::bounded(1);
Self { sender, receiver }
}
pub async fn try_send(&self, value: T) -> bool {
self.sender.try_send(value).is_ok()
}
pub async fn send(&self, value: T) {
let _ = self.sender.send(value).await;
}
pub async fn try_recv(&self) -> Option<T> {
match self.receiver.try_recv() {
Ok(v) => Some(v),
_ => None,
}
}
pub async fn recv(&self) -> T {
self.receiver.recv().await.unwrap()
}
}
| true |
9aaf77fd99264885c6a52831978c354fa22a0c86
|
Rust
|
luizinhoab/order-risk-assessment-ms
|
/src/interface/documents.rs
|
UTF-8
| 1,577 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
use crate::app::domain::models::Risk;
use regex::Regex;
use uuid::Uuid;
use validator::Validate;
lazy_static! {
static ref ISO8601: Regex = Regex::new(r"^([1][9][0-9]{2}|[2][0-9]{3})-([1-9]|([0][1-9]|[1][0-2]))-(([0][1-9]|[1][0-9]|[2][0-9]|[3][0-1])) (\d{2}):(\d{2}):(\d{2})$").unwrap();
}
#[derive(Debug, Clone, Validate, Deserialize, Serialize)]
pub struct RiskRequestBody {
pub order_number: i32,
pub customer_id: Option<Uuid>,
#[validate(length(min = 1, message = "Invalid name"))]
pub customer_name: String,
#[validate(length(min = 11, max = 11, message = "Invalid cpf"))]
pub customer_cpf: String,
#[validate(length(max = 16))]
pub card_number: String,
pub card_holder_name: String,
#[validate(length(min = 4, max = 4, message = "Invalid card expiration date"))]
pub card_expiration: String,
pub value: f32,
#[validate(regex = "ISO8601")]
pub creation_date_order: String,
}
impl RiskRequestBody {
pub fn map_to_domain(&self) -> Risk {
Risk {
order_number: self.order_number,
customer_id: self.customer_id,
customer_name: self.customer_name.clone(),
customer_cpf: self.customer_cpf.clone(),
card_number: self.card_number.clone(),
card_holder_name: self.card_holder_name.clone(),
value: self.value,
creation_date_order: chrono::NaiveDateTime::parse_from_str(
self.creation_date_order.as_ref(),
"%Y-%m-%d %H:%M:%S",
)
.unwrap(),
}
}
}
| true |
f31f57dce2d5a9be8ea7f3fe741060be432ee062
|
Rust
|
luksab/cargo_bot_simulator
|
/src/main.rs
|
UTF-8
| 1,806 | 2.765625 | 3 |
[] |
no_license
|
use std::time::Instant;
// TODO: stack optimieren (max größe, ältestes löschen)
// TODO: brute force
use cargo_bot_simulator::{CbInterpret, FinishState, StepState};
fn main() {
// let mut cb = CbInterpret::<5>::new("q.a>q1", "yy,n,n,n,n", "y,n,n,n,y").unwrap();
// let mut cb = CbInterpret::<4>::new("q.a>q1", "n,rrr,bbb,ggg,n", "rrr,bbb,ggg,n").unwrap();
let mut cb = CbInterpret::<4>::new("q.a>q1", "n,rrb,n,rbb", "b,rr,bb,r").unwrap();
//let mut cb = CbInterpret::<5>::new("qdq>q<qdq1", "y,n,n,n,n", "n,n,n,n,y").unwrap();
println!("{:0b}", cb.data[0]);
println!("{}", cb.print_crane());
println!("{}", cb.print_data());
// for i in 0..10 {
// cb.step();
// println!("{:?}", cb);
// for d in cb.data.iter() {
// //println!("{:?}", d);
// }
// }
let now = Instant::now();
// let mut steps = 0;
// while cb.step() == StepState::Normal {
// println!("{:?}", cb);
// steps += 1;
// println!("{}", cb.print_crane());
// println!("{}", cb.print_data());
// if steps == 10{
// return;
// }
// }
// let steps = match cb.run_all() {
// FinishState::Crashed(i) => {println!("Crashed");i},
// FinishState::Finished(i) => {println!("Finished");i},
// FinishState::Limited(i) => {println!("Limited");i},
// };
let steps = 0;
cb.brute_force();
println!("{:?}", cb);
let took = now.elapsed().as_nanos();
// println!("{:?}", cb);
println!("{}", cb.print_crane());
println!("{}", cb.print_data());
println!("{}", cb.print_inst());
println!(
"simulating {} steps took {}ns, that's {:.2}ns per step",
steps,
took,
took as f64 / steps as f64
);
}
| true |
685a6613f853d001e958de0c43a455ae3cc35467
|
Rust
|
alamont/rustray
|
/src/aarect.rs
|
UTF-8
| 2,194 | 2.9375 | 3 |
[] |
no_license
|
use crate::aabb::AABB;
use crate::hittable::{HitRecord, Hittable};
use crate::material::Material;
use crate::ray::Ray;
use crate::vec::{vec, vec3};
use nalgebra::{Vector2, Vector3};
use std::f32;
use std::sync::Arc;
pub enum AARectType {
XY,
XZ,
YZ,
}
pub struct AARect {
pub xy0: Vector2<f32>,
pub xy1: Vector2<f32>,
pub k: f32,
pub material: Arc<dyn Material>,
pub rect_type: AARectType
}
impl Hittable for AARect {
fn hit(&self, ray: &Ray, t_min: f32, t_max: f32) -> Option<HitRecord> {
use AARectType::*;
let t = match &self.rect_type {
XY => (self.k - ray.origin().z) / ray.direction().z,
XZ => (self.k - ray.origin().y) / ray.direction().y,
YZ => (self.k - ray.origin().x) / ray.direction().x,
};
if t < t_min || t > t_max {
return None;
}
let xy = match &self.rect_type {
XY => ray.origin().xy() + t * ray.direction().xy(),
XZ => ray.origin().xz() + t * ray.direction().xz(),
YZ => ray.origin().yz() + t * ray.direction().yz(),
};
if xy.x < self.xy0.x || xy.x > self.xy1.x || xy.y < self.xy0.y || xy.y > self.xy1.y {
return None;
}
let uv = (xy - self.xy0).component_div(&(self.xy1 - self.xy0));
let p = ray.at(t);
let outward_normal = match &self.rect_type {
XY => vec(0.0, 0.0, 1.0),
XZ => vec(0.0, -1.0, 0.0),
YZ => vec(1.0, 0.0, 0.0),
};
Some(HitRecord::new(
t,
p,
outward_normal,
ray,
Arc::clone(&self.material),
uv
))
}
fn bounding_box(&self) -> Option<AABB> {
use AARectType::*;
let min = vec(self.xy0.x, self.xy0.y, self.k - 0.0001);
let max = vec(self.xy1.x, self.xy1.y, self.k + 0.0001);
match &self.rect_type {
XY => Some(AABB { min, max }),
XZ => Some(AABB { min: min.xzy(), max: max.xzy() }),
YZ => Some(AABB { min: min.zxy(), max: max.zxy() }),
}
}
}
| true |
2bef356503e59ede790cc39244e0f4349d25b2f5
|
Rust
|
Lapz/tox
|
/parser/src/parser/expressions/unary.rs
|
UTF-8
| 1,017 | 3 | 3 |
[
"MIT"
] |
permissive
|
use syntax::T;
use crate::parser::pratt::{Precedence, PrefixParser};
use crate::parser::{Parser, Restrictions};
use crate::SyntaxKind::*;
impl<'a> Parser<'a> {
pub(crate) fn parse_unary_op(&mut self) {
match self.current() {
T![-] | T![!] => self.bump(),
_ => self.error(
"Expected one of `-` | `!`",
format!(
"Expected one of `-` | `!` but instead found `{:?}`",
self.current_string()
),
),
}
}
}
#[derive(Debug)]
pub struct UnaryParselet;
impl PrefixParser for UnaryParselet {
fn parse(&self, parser: &mut Parser) {
parser.start_node(PREFIX_EXPR);
parser.parse_unary_op();
parser.parse_expression(Precedence::Unary, Restrictions::default());
parser.finish_node();
}
}
#[cfg(test)]
mod tests {
test_parser! {parse_unary_expr,"fn main() {!true;-1}"}
test_parser! {parse_nested_unary_expr,"fn main() {!!true;}"}
}
| true |
8351e8d2ff0eedba463a542cf4ba8e8db0eacafa
|
Rust
|
xpader/rust-web-scaffolding
|
/src/base/rand.rs
|
UTF-8
| 138 | 2.625 | 3 |
[] |
no_license
|
use rand::Rng;
pub fn gen_rand(low: usize, high: usize) -> usize {
let mut rng = rand::thread_rng();
rng.gen_range(low, high+1)
}
| true |
309d0168fa7af99bfc469eec4308041b4144f21a
|
Rust
|
passcod/reasonable.kiwi
|
/active-ttl-cache/src/lib.rs
|
UTF-8
| 4,093 | 3.109375 | 3 |
[
"Artistic-2.0"
] |
permissive
|
use std::hash::Hash;
use std::collections::HashMap;
use crossbeam_channel::{Sender, bounded, unbounded};
use std::marker::PhantomData;
use std::time::Duration;
use std::thread::{sleep, spawn};
use std::ops::Deref;
pub struct Entry<K: Clone, V> {
notice: Sender<Event<K, V>>,
inner: V,
key: K,
}
impl<K, V> Deref for Entry<K, V>
where K: Clone,
V: Sized
{
type Target = V;
#[inline]
fn deref(&self) -> &V {
&self.inner
}
}
impl<K, V> Drop for Entry<K, V>
where K: Clone {
fn drop(&mut self) {
self.notice.send(Event::Decr(self.key.clone()))
.expect("Internal cache error: drop");
}
}
enum Event<K, V> {
Get(K, Sender<Option<V>>),
Decr(K),
Expire(K),
}
#[derive(Clone)]
pub struct Handle<K, V> {
sender: Sender<Event<K, V>>,
pk: PhantomData<K>,
pv: PhantomData<V>,
}
impl<K, V> Handle<K, V>
where K: Clone {
fn new(sender: Sender<Event<K, V>>) -> Self {
Self { sender, pk: PhantomData, pv: PhantomData }
}
pub fn get(&self, key: K) -> Option<Entry<K, V>> {
let (s, r) = bounded(1);
self.sender.send(Event::Get(key.clone(), s)).expect("Internal cache error: get send");
let notice = self.sender.clone();
r.recv().expect("Internal cache error: get recv").map(|inner| {
Entry { notice, inner, key }
})
}
}
struct Cache<K: Hash + Eq, V, F> {
inner: HashMap<K, (usize, V)>,
generator: F,
pub ttd: Duration, // time-to-die
}
pub fn start<K, V, F>(generator: F, ttd: Duration) -> Handle<K, V>
where K: 'static + Send + Clone + Hash + Eq,
V: 'static + Send + Clone,
F: 'static + Send + FnMut(K) -> Option<V>
{
Cache::start(generator, ttd)
}
impl<K, V, F> Cache<K, V, F>
where K: 'static + Send + Clone + Hash + Eq,
V: 'static + Send + Clone,
F: 'static + Send + FnMut(K) -> Option<V>
{
fn start(generator: F, ttd: Duration) -> Handle<K, V> {
let (s, r) = unbounded();
let mains = s.clone();
let handle = Handle::new(s);
spawn(move || {
let r = r;
let mut cache = Self {
inner: HashMap::new(),
generator,
ttd,
};
loop {
match r.recv().expect("Internal cache error: main recv") {
Event::Get(k, s) => {
let v = cache.get(k);
s.send(v).expect("Internal cache error: main send");
},
Event::Decr(k) => {
let expk = k.clone();
if cache.decr(k) {
let s = mains.clone();
let ttd = cache.ttd.clone();
spawn(move || {
sleep(ttd);
s.send(Event::Expire(expk)).expect("Internal cache error: expire");
});
}
},
Event::Expire(k) => {
cache.expire(k);
}
};
}
});
handle
}
fn get(&mut self, k: K) -> Option<V> {
if let Some((_, v)) = self.inner.get(&k) {
let v = v.clone();
self.incr(k);
Some(v)
} else if let Some(v) = (self.generator)(k.clone()) {
self.inner.insert(k, (0, v.clone()));
Some(v)
} else {
None
}
}
fn incr(&mut self, k: K) {
self.inner.entry(k).and_modify(|(rc, _)| { *rc += 1 });
}
fn decr(&mut self, k: K) -> bool {
self.inner.entry(k.clone()).and_modify(|(rc, _)| { *rc -= 1 });
if let Some((rc, _)) = self.inner.get(&k) {
*rc == 0
} else {
false
}
}
fn expire(&mut self, k: K) {
if self.inner.get(&k).map(|(rc, _)| *rc).unwrap_or(1) == 0 {
self.inner.remove(&k);
}
}
}
| true |
73c678d9f190c4f8e49880eae527d56ea137b7e5
|
Rust
|
QPC-database/ion-rust
|
/src/binary/uint.rs
|
UTF-8
| 6,445 | 3.75 | 4 |
[
"Apache-2.0"
] |
permissive
|
use std::io::Write;
use std::mem;
use crate::data_source::IonDataSource;
use crate::result::{decoding_error, IonResult};
type UIntStorage = u64;
const MAX_UINT_SIZE_IN_BYTES: usize = mem::size_of::<UIntStorage>();
/// Represents a fixed-length unsigned integer. See the
/// [UInt and Int Fields](http://amzn.github.io/ion-docs/docs/binary.html#uint-and-int-fields)
/// section of the binary Ion spec for more details.
#[derive(Debug)]
pub struct DecodedUInt {
size_in_bytes: usize,
value: UIntStorage,
}
impl DecodedUInt {
/// Reads a UInt with `length` bytes from the provided data source.
pub fn read<R: IonDataSource>(data_source: &mut R, length: usize) -> IonResult<DecodedUInt> {
if length > MAX_UINT_SIZE_IN_BYTES {
return decoding_error(format!(
"Found a {}-byte UInt. Max supported size is {} bytes.",
length, MAX_UINT_SIZE_IN_BYTES
));
}
// Create a stack-allocated buffer to hold the data we're going to read in.
let mut buffer = [0u8; MAX_UINT_SIZE_IN_BYTES];
// Get a mutable reference to a portion of the buffer just big enough to fit
// the requested number of bytes.
let buffer = &mut buffer[0..length];
let mut magnitude: UIntStorage = 0;
data_source.read_exact(buffer)?;
for &byte in buffer.iter() {
let byte = u64::from(byte);
magnitude <<= 8;
magnitude |= byte;
}
Ok(DecodedUInt {
size_in_bytes: length,
value: magnitude,
})
}
/// Encodes the provided `magnitude` as a UInt and writes it to the provided `sink`.
pub fn write_u64<W: Write>(sink: &mut W, magnitude: u64) -> IonResult<usize> {
let encoded = encode_uint(magnitude);
let bytes_to_write = encoded.as_ref();
sink.write_all(bytes_to_write)?;
Ok(bytes_to_write.len())
}
/// Returns the magnitude of the unsigned integer.
#[inline(always)]
pub fn value(&self) -> UIntStorage {
self.value
}
/// Returns the number of bytes that were read from the data source to construct this
/// unsigned integer.
#[inline(always)]
pub fn size_in_bytes(&self) -> usize {
self.size_in_bytes
}
}
/// The big-endian, compact slice of bytes for a UInt (`u64`). Leading zero
/// octets are not part of the representation. See the [spec] for more
/// information.
///
/// [spec]: https://amzn.github.io/ion-docs/docs/binary.html#uint-and-int-fields
#[derive(Copy, Clone, Debug)]
pub struct EncodedUInt {
be_bytes: [u8; mem::size_of::<u64>()],
first_occupied_byte: usize,
}
impl EncodedUInt {
/// Returns the slice view of the encoded UInt.
pub fn as_bytes(&self) -> &[u8] {
&self.be_bytes[self.first_occupied_byte..]
}
}
impl AsRef<[u8]> for EncodedUInt {
/// The same as [`as_bytes`].
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
/// Returns the magnitude as big-endian bytes.
///
/// ```
/// use ion_rs::binary::uint;
///
/// let repr = uint::encode_uint(5u64);
/// assert_eq!(&[0x05], repr.as_bytes());
///
/// let two_bytes = uint::encode_uint(256u64);
/// assert_eq!(&[0x01, 0x00], two_bytes.as_bytes());
/// ```
pub fn encode_uint(magnitude: u64) -> EncodedUInt {
// We can divide the number of leading zero bits by 8
// to to get the number of leading zero bytes.
let empty_leading_bytes: u32 = magnitude.leading_zeros() / 8;
let first_occupied_byte = empty_leading_bytes as usize;
let magnitude_bytes: [u8; mem::size_of::<u64>()] = magnitude.to_be_bytes();
EncodedUInt {
be_bytes: magnitude_bytes,
first_occupied_byte,
}
}
#[cfg(test)]
mod tests {
use super::DecodedUInt;
use std::io::Cursor;
const READ_ERROR_MESSAGE: &str = "Failed to read a UInt from the provided cursor.";
const WRITE_ERROR_MESSAGE: &str = "Writing a UInt to the provided sink failed.";
#[test]
fn test_read_one_byte_uint() {
let data = &[0b1000_0000];
let uint = DecodedUInt::read(&mut Cursor::new(data), data.len()).expect(READ_ERROR_MESSAGE);
assert_eq!(uint.size_in_bytes(), 1);
assert_eq!(uint.value(), 128);
}
#[test]
fn test_read_two_byte_uint() {
let data = &[0b0111_1111, 0b1111_1111];
let uint = DecodedUInt::read(&mut Cursor::new(data), data.len()).expect(READ_ERROR_MESSAGE);
assert_eq!(uint.size_in_bytes(), 2);
assert_eq!(uint.value(), 32_767);
}
#[test]
fn test_read_three_byte_uint() {
let data = &[0b0011_1100, 0b1000_0111, 0b1000_0001];
let uint = DecodedUInt::read(&mut Cursor::new(data), data.len()).expect(READ_ERROR_MESSAGE);
assert_eq!(uint.size_in_bytes(), 3);
assert_eq!(uint.value(), 3_966_849);
}
#[test]
fn test_read_uint_overflow() {
let data = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01];
let _uint = DecodedUInt::read(&mut Cursor::new(data), data.len())
.expect_err("This should have failed due to overflow.");
}
#[test]
fn test_write_eight_byte_uint() {
let value = 0x01_23_45_67_89_AB_CD_EF;
let mut buffer: Vec<u8> = vec![];
DecodedUInt::write_u64(&mut buffer, value).expect(WRITE_ERROR_MESSAGE);
let expected_bytes = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
assert_eq!(expected_bytes, buffer.as_slice());
}
#[test]
fn test_write_five_byte_uint() {
let value = 0x01_23_45_67_89;
let mut buffer: Vec<u8> = vec![];
DecodedUInt::write_u64(&mut buffer, value).expect(WRITE_ERROR_MESSAGE);
let expected_bytes = &[0x01, 0x23, 0x45, 0x67, 0x89];
assert_eq!(expected_bytes, buffer.as_slice());
}
#[test]
fn test_write_three_byte_uint() {
let value = 0x01_23_45;
let mut buffer: Vec<u8> = vec![];
DecodedUInt::write_u64(&mut buffer, value).expect(WRITE_ERROR_MESSAGE);
let expected_bytes: &[u8] = &[0x01, 0x23, 0x45];
assert_eq!(expected_bytes, buffer.as_slice());
}
#[test]
fn test_write_uint_zero() {
let value = 0x00;
let mut buffer: Vec<u8> = vec![];
DecodedUInt::write_u64(&mut buffer, value).expect(WRITE_ERROR_MESSAGE);
let expected_bytes: &[u8] = &[];
assert_eq!(expected_bytes, buffer.as_slice());
}
}
| true |
cb20125375a5212c095471be9bc5b8e48a381fcc
|
Rust
|
reem/transfer
|
/src/rt/metadata.rs
|
UTF-8
| 419 | 2.859375 | 3 |
[] |
no_license
|
use rt::Executor;
use std::sync::Arc;
use std::fmt;
/// Runtime Metadata
///
/// Metadata needed by the runtime to execute actions in other contexts,
/// usually on other threads.
#[derive(Clone)]
pub struct Metadata {
pub executor: Arc<Box<Executor>>
}
impl fmt::Debug for Metadata {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Metadata { executor: Box<Executor> }")
}
}
| true |
624517d5d6d05503bb2377ac79dda279c17b40d0
|
Rust
|
TheAlgorithms/Rust
|
/src/math/sine.rs
|
UTF-8
| 1,529 | 3.671875 | 4 |
[
"MIT"
] |
permissive
|
// Calculate Sine function.
// Formula: sine(x) = x - x^3/3! + x^5/5! - x^7/7! + ...
// Where: x = angle in randians.
// It is not a real function so I will just do 9 loops, it's just an approximation.
// Source:
// https://web.archive.org/web/20221111013039/https://www.homeschoolmath.net/teaching/sine_calculator.php
use std::f32::consts::PI;
fn factorial(num: u64) -> u64 {
(1..=num).product()
}
pub fn sine(angle: f64) -> f64 {
// Simplify the angle
let angle = angle % (2.0 * PI as f64);
let mut result = angle;
let mut a: u64 = 3;
let mut b = -1.0;
for _ in 0..9 {
result += b * (angle.powi(a as i32)) / (factorial(a) as f64);
b = -b;
a += 2;
}
result
}
#[cfg(test)]
mod tests {
use super::{sine, PI};
fn assert(angle: f64, expected_result: f64) {
// I will round the result to 3 decimal places, since it's an approximation.
assert_eq!(
format!("{:.3}", sine(angle)),
format!("{:.3}", expected_result)
);
}
#[test]
fn test_sine() {
assert(0.0, 0.0);
assert(PI as f64 / 2.0, 1.0);
assert(PI as f64 / 4.0, 1.0 / f64::sqrt(2.0));
assert(PI as f64, -0.0);
assert(PI as f64 * 3.0 / 2.0, -1.0);
assert(PI as f64 * 2.0, 0.0);
assert(PI as f64 * 2.0 * 3.0, 0.0);
assert(-PI as f64, 0.0);
assert(-PI as f64 / 2.0, -1.0);
assert(PI as f64 * 8.0 / 45.0, 0.5299192642);
assert(0.5, 0.4794255386);
}
}
| true |
e85281cc82dd0a35df8d334663ad254421736a08
|
Rust
|
gengteng/rust-http2
|
/src/data_or_headers_with_flag.rs
|
UTF-8
| 5,084 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
use futures::stream;
use futures::stream::Stream;
use bytes::Bytes;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_trailers::DataOrTrailers;
use crate::error;
use crate::misc::any_to_string;
use crate::result;
use crate::solicit::end_stream::EndStream;
use crate::solicit::header::Headers;
use crate::solicit_async::HttpFutureStreamSend;
use futures::future;
use futures::stream::StreamExt;
use futures::stream::TryStreamExt;
use futures::task::Context;
use std::panic::AssertUnwindSafe;
use std::pin::Pin;
use std::task::Poll;
/// Stream frame content with END_STREAM flag
#[derive(Debug)]
pub struct DataOrHeadersWithFlag {
pub content: DataOrHeaders,
/// END_STREAM
pub last: bool,
}
impl DataOrHeadersWithFlag {
pub fn last_headers(headers: Headers) -> Self {
DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(headers),
last: true,
}
}
pub fn intermediate_headers(headers: Headers) -> Self {
DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(headers),
last: false,
}
}
pub fn intermediate_data(data: Bytes) -> Self {
DataOrHeadersWithFlag {
content: DataOrHeaders::Data(data),
last: false,
}
}
pub fn last_data(data: Bytes) -> Self {
DataOrHeadersWithFlag {
content: DataOrHeaders::Data(data),
last: true,
}
}
pub fn into_after_headers(self) -> DataOrTrailers {
let DataOrHeadersWithFlag { content, last } = self;
match (content, last) {
(DataOrHeaders::Data(data), last) => {
let end_stream = if last { EndStream::Yes } else { EndStream::No };
DataOrTrailers::Data(data, end_stream)
}
(DataOrHeaders::Headers(headers), _) => DataOrTrailers::Trailers(headers),
}
}
}
impl From<DataOrTrailers> for DataOrHeadersWithFlag {
fn from(d: DataOrTrailers) -> Self {
match d {
DataOrTrailers::Data(data, end_stream) => DataOrHeadersWithFlag {
content: DataOrHeaders::Data(data),
last: end_stream == EndStream::Yes,
},
DataOrTrailers::Trailers(trailers) => DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(trailers),
last: true,
},
}
}
}
/// Stream of DATA of HEADER frames
pub struct DataOrHeadersWithFlagStream(pub HttpFutureStreamSend<DataOrHeadersWithFlag>);
impl DataOrHeadersWithFlagStream {
// constructors
pub fn new<S>(s: S) -> DataOrHeadersWithFlagStream
where
S: Stream<Item = result::Result<DataOrHeadersWithFlag>> + Send + 'static,
{
DataOrHeadersWithFlagStream(Box::pin(s))
}
pub fn empty() -> DataOrHeadersWithFlagStream {
DataOrHeadersWithFlagStream::new(stream::empty())
}
pub fn bytes<S>(bytes: S) -> DataOrHeadersWithFlagStream
where
S: Stream<Item = result::Result<Bytes>> + Send + 'static,
{
DataOrHeadersWithFlagStream::new(bytes.map_ok(DataOrHeadersWithFlag::intermediate_data))
}
pub fn once(part: DataOrHeaders) -> DataOrHeadersWithFlagStream {
DataOrHeadersWithFlagStream::new(stream::once(future::ok(DataOrHeadersWithFlag {
content: part,
last: true,
})))
}
pub fn once_bytes<B>(bytes: B) -> DataOrHeadersWithFlagStream
where
B: Into<Bytes>,
{
DataOrHeadersWithFlagStream::once(DataOrHeaders::Data(bytes.into()))
}
// getters
/// Create a stream without "last" flag
pub fn drop_last_flag(self) -> HttpFutureStreamSend<DataOrHeaders> {
Box::pin(self.map_ok(|DataOrHeadersWithFlag { content, .. }| content))
}
/// Take only `DATA` frames from the stream
pub fn filter_data(self) -> HttpFutureStreamSend<Bytes> {
Box::pin(
self.try_filter_map(|DataOrHeadersWithFlag { content, .. }| {
future::ok(match content {
DataOrHeaders::Data(data) => Some(data),
_ => None,
})
}),
)
}
/// Wrap a stream with `catch_unwind` combinator.
/// Transform panic into `error::Error`
pub fn catch_unwind(self) -> DataOrHeadersWithFlagStream {
DataOrHeadersWithFlagStream::new(AssertUnwindSafe(self.0).catch_unwind().then(|r| {
future::ready(match r {
Ok(r) => r,
Err(e) => {
let e = any_to_string(e);
// TODO: send plain text error if headers weren't sent yet
warn!("handler panicked: {}", e);
Err(error::Error::HandlerPanicked(e))
}
})
}))
}
}
impl Stream for DataOrHeadersWithFlagStream {
type Item = result::Result<DataOrHeadersWithFlag>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.0).poll_next(cx)
}
}
| true |
c4ab0a7c611e4285dd97dc1d25c782457e3ec5fd
|
Rust
|
sowetocon/sowetocon.github.io
|
/crate/src/pages/home.rs
|
UTF-8
| 1,168 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
use yew::prelude::*;
use yew_styles::layouts::{
container::{Container, Direction, Wrap},
item::{Item, ItemLayout},
};
pub struct Home;
impl Component for Home {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
Home {}
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
false
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
html! {
<Container direction=Direction::Row wrap=Wrap::Wrap class_name="content">
<Item layouts=vec!(ItemLayout::ItXs(12))>
<img srcset="sowetocon_bg320.png,
sowetocon_bg480.png 1.5x,
sowetocon_bg640.png 2x"
src="sowetocon_bg640.png"
alt="Soweto Conference" />
//<img src="./sowetocon_poster.png"/>
<p><b>{"Education is the most powerful weapon which you can use to change the world. -Mandela."}</b></p>
</Item>
</Container>
}
}
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.