Dataset Viewer
blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
751a2fd63de8d440cbb843005df566800b4b1d67
|
Rust
|
Twinklebear/light_arena
|
/src/lib.rs
|
UTF-8
| 9,254 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
//! **Temporarily a more simple memory pool for keeping stack alloc objects
//! in copied into a shared heap rather than a true placement new memory arena.**
//! Unfortunately the path forward for placement new in Rust does not look
//! good right now, so I've reverted this crate to work more like a memory
//! heap where stuff can be put, but not constructed in place. This mimics
//! similar behavior, but allocations are limited to the stack size and
//! must first be made on the stack then copied in.
//!
//! This crate is written to solve a specific problem I have in
//! [tray\_rust](https://github.com/Twinklebear/tray_rust), where I want to
//! store trait objects and f32 arrays in a memory arena which is then reset
//! and reused for each pixel rendered (but not free'd and reallocated!).
//! The key features to enable this are the use of the nightly placement new feature, letting us
//! actually construct objects in place instead of copying from a stack temporary,
//! and reusing the previously allocated space via the `Allocator` scopes.
//! If you have a similar problem, this might be the right crate for you!
//! ## Examples
//!
//! Allocations in a `MemoryArena` are made using an allocator and the
//! placement in syntax. The `Allocator` grants exclusive access to the
//! arena while it's in scope, allowing to make allocations. Once the `Allocator`
//! is dropped the space used is marked available again for subsequent allocations.
//! Note that **Drop is never called** on objects allocated in the arena,
//! and thus the restriction that `T: Sized + Copy`.
//!
//! The arena is untyped and can store anything which is `Sized + Copy`.
//!
//! ```rust
//!
//! trait Foo {
//! fn speak(&self);
//! }
//!
//! #[derive(Copy, Clone)]
//! struct Bar(i32);
//! impl Foo for Bar {
//! fn speak(&self) {
//! println!("Bar! val = {}", self.0);
//! }
//! }
//!
//! #[derive(Copy, Clone)]
//! struct Baz;
//! impl Foo for Baz {
//! fn speak(&self) {
//! println!("Baz!");
//! }
//! }
//!
//! let mut arena = light_arena::MemoryArena::new(2);
//! let allocator = arena.allocator();
//! let a: &Foo = allocator.alloc(Baz);
//! let b: &Foo = allocator.alloc(Bar(10));
//! let c: &Foo = allocator.alloc(Bar(14));
//! a.speak();
//! b.speak();
//! c.speak();
//! // Storing 0-sized types can give some interesting results
//! println!("a = {:p}", a as *const Foo);
//! println!("b = {:p}", b as *const Foo);
//! println!("c = {:p}", c as *const Foo);
//! ```
//!
//! ## Blockers
//!
//! - placement\_in\_syntax and placement\_new\_protocol are required,
//! see https://github.com/rust-lang/rust/issues/27779
use std::cell::RefCell;
use std::{cmp, mem, ptr};
/// A block of bytes used to back allocations requested from the `MemoryArena`.
struct Block {
buffer: Vec<u8>,
size: usize,
}
impl Block {
/// Create a new block of some fixed size, in bytes
fn new(size: usize) -> Block {
Block {
buffer: Vec::with_capacity(size),
size: 0,
}
}
/// Reserve `size` bytes at alignment `align`. Returns null if the block doesn't
/// have enough room.
unsafe fn reserve(&mut self, size: usize, align: usize) -> *mut u8 {
if self.has_room(size, align) {
let align_offset =
align_address(self.buffer.as_ptr().offset(self.size as isize), align);
let ptr = self.buffer
.as_mut_ptr()
.offset((self.size + align_offset) as isize);
self.size += size + align_offset;
ptr
} else {
ptr::null_mut()
}
}
/// Check if this block has `size` bytes available at alignment `align`
fn has_room(&self, size: usize, align: usize) -> bool {
let ptr = unsafe { self.buffer.as_ptr().offset(self.size as isize) };
let align_offset = align_address(ptr, align);
self.buffer.capacity() - self.size >= size + align_offset
}
}
/// Compute the number of bytes we need to offset the `ptr` by to align
/// it to the desired alignment.
fn align_address(ptr: *const u8, align: usize) -> usize {
let addr = ptr as usize;
if addr % align != 0 {
align - addr % align
} else {
0
}
}
/// Provides the backing storage to serve allocations requested by an `Allocator`.
///
/// The `MemoryArena` allocates blocks of fixed size on demand as its existing
/// blocks get filled by allocation requests. To make allocations in the
/// arena use the `Allocator` returned by `allocator`. Only one `Allocator`
/// can be active for an arena at a time, after the allocator is dropped
/// the space used by its allocations is made available again.
pub struct MemoryArena {
blocks: Vec<Block>,
block_size: usize,
}
impl MemoryArena {
/// Create a new `MemoryArena` with the requested block size (in MB).
/// The arena will allocate one initial block on creation, and further
/// blocks of `block_size_mb` size, or larger if needed to meet a large
/// allocation, on demand as allocations are made.
pub fn new(block_size_mb: usize) -> MemoryArena {
let block_size = block_size_mb * 1024 * 1024;
MemoryArena {
blocks: vec![Block::new(block_size)],
block_size: block_size,
}
}
/// Get an allocator for the arena. Only a single `Allocator` can be
/// active for an arena at a time. Upon destruction of the `Allocator`
/// its allocated data is marked available again.
pub fn allocator(&mut self) -> Allocator {
Allocator {
arena: RefCell::new(self),
}
}
/// Reserve a chunk of bytes in some block of the memory arena
unsafe fn reserve(&mut self, size: usize, align: usize) -> *mut u8 {
for b in &mut self.blocks[..] {
if b.has_room(size, align) {
return b.reserve(size, align);
}
}
// No free blocks with enough room, we have to allocate. We also make
// sure we've got align bytes of padding available as we don't assume
// anything about the alignment of the underlying buffer.
let new_block_size = cmp::max(self.block_size, size + align);
self.blocks.push(Block::new(new_block_size));
let b = &mut self.blocks.last_mut().unwrap();
b.reserve(size, align)
}
}
/// The allocator provides exclusive access to the memory arena, allowing
/// for allocation of objects in the arena.
///
/// Objects allocated by an allocated cannot outlive it, upon destruction
/// of the allocator the memory space it requested will be made available
/// again. **Drops of allocated objects are not called**, only
/// types which are `Sized + Copy` can be safely stored.
pub struct Allocator<'a> {
arena: RefCell<&'a mut MemoryArena>,
}
impl<'a> Allocator<'a> {
/// Get a dynamically sized slice of data from the allocator. The
/// contents of the slice will be unintialized.
pub fn alloc_slice<T: Sized + Copy>(&self, len: usize) -> &mut [T] {
let mut arena = self.arena.borrow_mut();
let size = len * mem::size_of::<T>();
unsafe {
let ptr = arena.reserve(size, mem::align_of::<T>()) as *mut T;
std::slice::from_raw_parts_mut(ptr, len)
}
}
pub fn alloc<T: Sized + Copy>(&self, object: T) -> &mut T {
assert!(!mem::needs_drop::<T>());
// assert!(mem::size_of::<T>() != 0);
let mut arena = self.arena.borrow_mut();
unsafe {
let ptr = arena.reserve(mem::size_of::<T>(), mem::align_of::<T>());
ptr::write(ptr as *mut T, object);
&mut *(ptr as *mut T)
}
}
}
impl<'a> Drop for Allocator<'a> {
/// Upon dropping the allocator we mark all the blocks in the arena
/// as empty again, "releasing" our allocations.
fn drop(&mut self) {
let mut arena = self.arena.borrow_mut();
for b in &mut arena.blocks[..] {
b.size = 0;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn aligner() {
assert_eq!(align_address(4 as *const u8, 4), 0);
assert_eq!(align_address(5 as *const u8, 4), 3);
assert_eq!(align_address(17 as *const u8, 1), 0);
}
#[test]
fn block() {
let mut b = Block::new(16);
assert!(b.has_room(16, 1));
let a = unsafe { b.reserve(3, 1) };
let c = unsafe { b.reserve(4, 4) };
assert_eq!(c as usize - a as usize, 4);
// This check is kind of assuming that the block's buffer
// is at least 4-byte aligned which is probably a safe assumption.
assert_eq!(b.size, 8);
assert!(!b.has_room(32, 4));
let d = unsafe { b.reserve(32, 4) };
assert_eq!(d, ptr::null_mut());
}
#[test]
fn memory_arena() {
let mut arena = MemoryArena::new(1);
let a = unsafe { arena.reserve(1024, 4) };
assert_eq!(align_address(a, 4), 0);
let two_mb = 2 * 1024 * 1024;
let b = unsafe { arena.reserve(two_mb, 32) };
assert_eq!(align_address(b, 32), 0);
assert_eq!(arena.blocks.len(), 2);
assert_eq!(arena.blocks[1].buffer.capacity(), two_mb + 32);
}
}
| true |
1dbcc26a6d8426a596e8138f0e0a47529252b843
|
Rust
|
kbknapp/usbwatch-rs
|
/src/state.rs
|
UTF-8
| 5,912 | 2.921875 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
use std::{
collections::HashMap,
fs::{self, File},
path::Path,
};
use tracing::{self, debug, info, span, Level};
use yaml_rust::YamlLoader;
use crate::{
rule::{Rule, Rules},
usb::{UsbDevice, UsbDevices, UsbPort, UsbPorts},
};
#[derive(Default)]
pub struct State {
ports: Vec<UsbPort>,
devices: Vec<UsbDevice>,
active_devices: Vec<usize>,
// Port->Device
slot_map: HashMap<usize, Option<usize>>,
// Device->Port
rev_slot_map: HashMap<usize, usize>,
pub rules: Vec<Rule>,
}
impl State {
pub fn new() -> Self { Self::default() }
pub fn devices_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn devices_from_file", file = ?path.as_ref());
let _enter = span.enter();
let file = File::open(path).unwrap();
let devices: UsbDevices = serde_yaml::from_reader(file).unwrap();
info!(num_devs= %devices.devices.len(), "Found Devices");
for device in devices.devices.into_iter() {
debug!(device = %device, "Adding Device");
self.add_device(device);
}
}
pub fn ports_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn ports_from_file", file = ?path.as_ref());
let _enter = span.enter();
let file = File::open(path).unwrap();
let ports: UsbPorts = serde_yaml::from_reader(file).unwrap();
info!(num_ports= %ports.ports.len(), "Found Ports");
for port in ports.ports.into_iter() {
debug!(port = %port, "Adding Port");
self.add_port(port);
}
}
pub fn rules_from_file<P: AsRef<Path>>(&mut self, path: P) {
let span = span!(Level::TRACE, "fn rules_from_file", file = ?path.as_ref());
let _enter = span.enter();
let buf = fs::read_to_string(path).unwrap();
let rules = Rules::from(&YamlLoader::load_from_str(&buf).unwrap()[0]);
info!(num_rules= %rules.rules.len(), "Found Rules");
for rule in rules.rules.into_iter() {
debug!(ruel = ?rule.name, "Adding Rule");
self.rules.push(rule);
}
}
pub fn add_port(&mut self, port: UsbPort) {
let span = span!(Level::TRACE, "fn add_port", port = %port);
let _enter = span.enter();
for p in self.ports.iter() {
if p == &port {
debug!("Port already exists; returning");
return;
}
}
self.ports.push(port);
debug!(key = self.ports.len(), "Slotting empty port");
self.slot_map.entry(self.ports.len()).or_insert(None);
}
pub fn add_device(&mut self, device: UsbDevice) {
let span = span!(Level::TRACE, "fn add_device", device = %device);
let _enter = span.enter();
if self.devices.contains(&device) {
debug!("Device already exists; returning");
return;
}
self.devices.push(device);
}
pub fn add_and_slot_device(&mut self, device: UsbDevice, port: UsbPort) {
let span = span!(Level::TRACE, "fn add_and_slot_device", device = %device, port = %port);
let _enter = span.enter();
self.add_port(port.clone());
self.add_device(device.clone());
for (i, p) in self.ports.iter().enumerate() {
debug!(i=i, port = %p, "Iter ports");
if p == &port {
debug!("Matched Port");
for (j, d) in self.devices.iter().enumerate() {
debug!(j=j, device = %d, "Iter devices");
if d == &device {
debug!("Matched device");
debug!(
i = i,
j = j,
"Setting port slot {} to device index {}",
i,
j
);
*self.slot_map.entry(i).or_insert(Some(j)) = Some(j);
debug!(
i = i,
j = j,
"Setting reverse slot map device index {} to slot {}",
j,
i
);
*self.rev_slot_map.entry(j).or_insert(i) = i;
debug!("Activating device index {}", j);
self.active_devices.push(j);
debug!("Returning");
break;
}
}
}
}
}
pub fn rm_and_unslot_device(&mut self, device: UsbDevice) {
let span = span!(Level::TRACE, "fn rm_and_unslot_device", device = %device);
let _enter = span.enter();
for (i, d) in self.devices.iter().enumerate() {
debug!(i=i, device = %d, "Iter devices");
if d == &device {
debug!("Matched device");
if let Some(p) = self.rev_slot_map.get_mut(&i) {
debug!(
"Found port index {} via device reverse slot map index {}",
p, i
);
debug!("Setting slot map {} to None", p);
*self.slot_map.entry(*p).or_insert(None) = None;
}
let mut to_rem = None;
for (j, idx) in self.active_devices.iter().enumerate() {
if *idx == i {
to_rem = Some(j);
break;
}
}
if let Some(idx) = to_rem {
debug!("Removing device index {} from active devices", idx);
self.active_devices.swap_remove(idx);
}
debug!("Returning");
break;
}
}
}
}
| true |
e942eef8a7b54c5bea48e16e4c8115fb3e7edce2
|
Rust
|
baitcenter/gdlk
|
/api/src/util.rs
|
UTF-8
| 1,566 | 3.078125 | 3 |
[] |
no_license
|
//! General utility functions and types.
#[cfg(test)]
pub use tests::*;
use diesel::{r2d2::ConnectionManager, PgConnection};
/// Type aliases for DB connections
pub type Pool = r2d2::Pool<ConnectionManager<PgConnection>>;
pub type PooledConnection =
r2d2::PooledConnection<ConnectionManager<PgConnection>>;
#[cfg(test)]
mod tests {
use super::*;
use diesel::Connection;
/// Helper to create a database connection for testing. This establishes
/// the connection, then starts a test transaction on it so that no changes
/// will actually be written to the DB.
pub fn test_db_conn() -> PooledConnection {
let database_url = std::env::var("DATABASE_URL").unwrap();
// We want to build a connection pool so that we can pass into APIs
// that expect owned, pooled connections. The pool will also
// automatically close our connections for us.
let manager = diesel::r2d2::ConnectionManager::new(&database_url);
let pool = r2d2::Pool::builder().max_size(5).build(manager).unwrap();
let conn = pool.get().unwrap();
(&conn as &PgConnection).begin_test_transaction().unwrap();
conn
}
/// Assert that the first value is an Err, and that its string form matches
/// the second argument.
#[macro_export]
macro_rules! assert_err {
($res:expr, $msg:tt $(,)?) => {
match $res {
Ok(_) => panic!("Expected Err, got Ok"),
Err(err) => assert_eq!(format!("{}", err), $msg),
}
};
}
}
| true |
69b8a13cedffdf5f63a897310fe5a57949ab4a19
|
Rust
|
chromium/chromium
|
/third_party/rust/semver/v1/crate/src/lib.rs
|
UTF-8
| 19,858 | 2.828125 | 3 |
[
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! [![github]](https://github.com/dtolnay/semver) [![crates-io]](https://crates.io/crates/semver) [![docs-rs]](https://docs.rs/semver)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! A parser and evaluator for Cargo's flavor of Semantic Versioning.
//!
//! Semantic Versioning (see <https://semver.org>) is a guideline for how
//! version numbers are assigned and incremented. It is widely followed within
//! the Cargo/crates.io ecosystem for Rust.
//!
//! <br>
//!
//! # Example
//!
//! ```
//! use semver::{BuildMetadata, Prerelease, Version, VersionReq};
//!
//! fn main() {
//! let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
//!
//! // Check whether this requirement matches version 1.2.3-alpha.1 (no)
//! let version = Version {
//! major: 1,
//! minor: 2,
//! patch: 3,
//! pre: Prerelease::new("alpha.1").unwrap(),
//! build: BuildMetadata::EMPTY,
//! };
//! assert!(!req.matches(&version));
//!
//! // Check whether it matches 1.3.0 (yes it does)
//! let version = Version::parse("1.3.0").unwrap();
//! assert!(req.matches(&version));
//! }
//! ```
//!
//! <br><br>
//!
//! # Scope of this crate
//!
//! Besides Cargo, several other package ecosystems and package managers for
//! other languages also use SemVer: RubyGems/Bundler for Ruby, npm for
//! JavaScript, Composer for PHP, CocoaPods for Objective-C...
//!
//! The `semver` crate is specifically intended to implement Cargo's
//! interpretation of Semantic Versioning.
//!
//! Where the various tools differ in their interpretation or implementation of
//! the spec, this crate follows the implementation choices made by Cargo. If
//! you are operating on version numbers from some other package ecosystem, you
//! will want to use a different semver library which is appropriate to that
//! ecosystem.
//!
//! The extent of Cargo's SemVer support is documented in the *[Specifying
//! Dependencies]* chapter of the Cargo reference.
//!
//! [Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
#![doc(html_root_url = "https://docs.rs/semver/1.0.4")]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(all(not(feature = "std"), not(no_alloc_crate)), no_std)]
#![cfg_attr(not(no_unsafe_op_in_unsafe_fn_lint), deny(unsafe_op_in_unsafe_fn))]
#![cfg_attr(no_unsafe_op_in_unsafe_fn_lint, allow(unused_unsafe))]
#![cfg_attr(no_str_strip_prefix, allow(unstable_name_collisions))]
#![allow(
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::doc_markdown,
clippy::items_after_statements,
clippy::match_bool,
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::needless_doctest_main,
clippy::option_if_let_else,
clippy::ptr_as_ptr,
clippy::redundant_else,
clippy::semicolon_if_nothing_returned, // https://github.com/rust-lang/rust-clippy/issues/7324
clippy::similar_names,
clippy::unnested_or_patterns,
clippy::unseparated_literal_suffix,
clippy::wildcard_imports
)]
#[cfg(not(no_alloc_crate))]
extern crate alloc;
mod backport;
mod display;
mod error;
mod eval;
mod identifier;
mod impls;
mod parse;
#[cfg(feature = "serde")]
mod serde;
use crate::alloc::vec::Vec;
use crate::identifier::Identifier;
use core::str::FromStr;
#[allow(unused_imports)]
use crate::backport::*;
pub use crate::parse::Error;
/// **SemVer version** as defined by <https://semver.org>.
///
/// # Syntax
///
/// - The major, minor, and patch numbers may be any integer 0 through u64::MAX.
/// When representing a SemVer version as a string, each number is written as
/// a base 10 integer. For example, `1.0.119`.
///
/// - Leading zeros are forbidden in those positions. For example `1.01.00` is
/// invalid as a SemVer version.
///
/// - The pre-release identifier, if present, must conform to the syntax
/// documented for [`Prerelease`].
///
/// - The build metadata, if present, must conform to the syntax documented for
/// [`BuildMetadata`].
///
/// - Whitespace is not allowed anywhere in the version.
///
/// # Total ordering
///
/// Given any two SemVer versions, one is less than, greater than, or equal to
/// the other. Versions may be compared against one another using Rust's usual
/// comparison operators.
///
/// - The major, minor, and patch number are compared numerically from left to
/// right, lexicographically ordered as a 3-tuple of integers. So for example
/// version `1.5.0` is less than version `1.19.0`, despite the fact that
/// "1.19.0" < "1.5.0" as ASCIIbetically compared strings and 1.19 < 1.5
/// as real numbers.
///
/// - When major, minor, and patch are equal, a pre-release version is
/// considered less than the ordinary release: version `1.0.0-alpha.1` is
/// less than version `1.0.0`.
///
/// - Two pre-releases of the same major, minor, patch are compared by
/// lexicographic ordering of dot-separated components of the pre-release
/// string.
///
/// - Identifiers consisting of only digits are compared
/// numerically: `1.0.0-pre.8` is less than `1.0.0-pre.12`.
///
/// - Identifiers that contain a letter or hyphen are compared in ASCII sort
/// order: `1.0.0-pre12` is less than `1.0.0-pre8`.
///
/// - Any numeric identifier is always less than any non-numeric
/// identifier: `1.0.0-pre.1` is less than `1.0.0-pre.x`.
///
/// Example: `1.0.0-alpha` < `1.0.0-alpha.1` < `1.0.0-alpha.beta` < `1.0.0-beta` < `1.0.0-beta.2` < `1.0.0-beta.11` < `1.0.0-rc.1` < `1.0.0`
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Version {
pub major: u64,
pub minor: u64,
pub patch: u64,
pub pre: Prerelease,
pub build: BuildMetadata,
}
/// **SemVer version requirement** describing the intersection of some version
/// comparators, such as `>=1.2.3, <1.8`.
///
/// # Syntax
///
/// - Either `*` (meaning "any"), or one or more comma-separated comparators.
///
/// - A [`Comparator`] is an operator ([`Op`]) and a partial version, separated
/// by optional whitespace. For example `>=1.0.0` or `>=1.0`.
///
/// - Build metadata is syntactically permitted on the partial versions, but is
/// completely ignored, as it's never relevant to whether any comparator
/// matches a particular version.
///
/// - Whitespace is permitted around commas and around operators. Whitespace is
/// not permitted within a partial version, i.e. anywhere between the major
/// version number and its minor, patch, pre-release, or build metadata.
#[derive(Default, Clone, Eq, PartialEq, Hash, Debug)]
pub struct VersionReq {
pub comparators: Vec<Comparator>,
}
/// A pair of comparison operator and partial version, such as `>=1.2`. Forms
/// one piece of a VersionReq.
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
pub struct Comparator {
pub op: Op,
pub major: u64,
pub minor: Option<u64>,
/// Patch is only allowed if minor is Some.
pub patch: Option<u64>,
/// Non-empty pre-release is only allowed if patch is Some.
pub pre: Prerelease,
}
/// SemVer comparison operator: `=`, `>`, `>=`, `<`, `<=`, `~`, `^`, `*`.
///
/// # Op::Exact
/// -  **`=I.J.K`** — exactly the version I.J.K
/// -  **`=I.J`** — equivalent to `>=I.J.0, <I.(J+1).0`
/// -  **`=I`** — equivalent to `>=I.0.0, <(I+1).0.0`
///
/// # Op::Greater
/// -  **`>I.J.K`**
/// -  **`>I.J`** — equivalent to `>=I.(J+1).0`
/// -  **`>I`** — equivalent to `>=(I+1).0.0`
///
/// # Op::GreaterEq
/// -  **`>=I.J.K`**
/// -  **`>=I.J`** — equivalent to `>=I.J.0`
/// -  **`>=I`** — equivalent to `>=I.0.0`
///
/// # Op::Less
/// -  **`<I.J.K`**
/// -  **`<I.J`** — equivalent to `<I.J.0`
/// -  **`<I`** — equivalent to `<I.0.0`
///
/// # Op::LessEq
/// -  **`<=I.J.K`**
/// -  **`<=I.J`** — equivalent to `<I.(J+1).0`
/// -  **`<=I`** — equivalent to `<(I+1).0.0`
///
/// # Op::Tilde ("patch" updates)
/// *Tilde requirements allow the **patch** part of the semver version (the third number) to increase.*
/// -  **`~I.J.K`** — equivalent to `>=I.J.K, <I.(J+1).0`
/// -  **`~I.J`** — equivalent to `=I.J`
/// -  **`~I`** — equivalent to `=I`
///
/// # Op::Caret ("compatible" updates)
/// *Caret requirements allow parts that are **right of the first nonzero** part of the semver version to increase.*
/// -  **`^I.J.K`** (for I\>0) — equivalent to `>=I.J.K, <(I+1).0.0`
/// -  **`^0.J.K`** (for J\>0) — equivalent to `>=0.J.K, <0.(J+1).0`
/// -  **`^0.0.K`** — equivalent to `=0.0.K`
/// -  **`^I.J`** (for I\>0 or J\>0) — equivalent to `^I.J.0`
/// -  **`^0.0`** — equivalent to `=0.0`
/// -  **`^I`** — equivalent to `=I`
///
/// # Op::Wildcard
/// -  **`I.J.*`** — equivalent to `=I.J`
/// -  **`I.*`** or **`I.*.*`** — equivalent to `=I`
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
#[cfg_attr(not(no_non_exhaustive), non_exhaustive)]
pub enum Op {
Exact,
Greater,
GreaterEq,
Less,
LessEq,
Tilde,
Caret,
Wildcard,
#[cfg(no_non_exhaustive)] // rustc <1.40
#[doc(hidden)]
__NonExhaustive,
}
/// Optional pre-release identifier on a version string. This comes after `-` in
/// a SemVer version, like `1.0.0-alpha.1`
///
/// # Examples
///
/// Some real world pre-release idioms drawn from crates.io:
///
/// - **[mio]** <code>0.7.0-<b>alpha.1</b></code> — the most common style
/// for numbering pre-releases.
///
/// - **[pest]** <code>1.0.0-<b>beta.8</b></code>, <code>1.0.0-<b>rc.0</b></code>
/// — this crate makes a distinction between betas and release
/// candidates.
///
/// - **[sassers]** <code>0.11.0-<b>shitshow</b></code> — ???.
///
/// - **[atomic-utils]** <code>0.0.0-<b>reserved</b></code> — a squatted
/// crate name.
///
/// [mio]: https://crates.io/crates/mio
/// [pest]: https://crates.io/crates/pest
/// [atomic-utils]: https://crates.io/crates/atomic-utils
/// [sassers]: https://crates.io/crates/sassers
///
/// *Tip:* Be aware that if you are planning to number your own pre-releases,
/// you should prefer to separate the numeric part from any non-numeric
/// identifiers by using a dot in between. That is, prefer pre-releases
/// `alpha.1`, `alpha.2`, etc rather than `alpha1`, `alpha2` etc. The SemVer
/// spec's rule for pre-release precedence has special treatment of numeric
/// components in the pre-release string, but only if there are no non-digit
/// characters in the same dot-separated component. So you'd have `alpha.2` <
/// `alpha.11` as intended, but `alpha11` < `alpha2`.
///
/// # Syntax
///
/// Pre-release strings are a series of dot separated identifiers immediately
/// following the patch version. Identifiers must comprise only ASCII
/// alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must not be
/// empty. Numeric identifiers must not include leading zeros.
///
/// # Total ordering
///
/// Pre-releases have a total order defined by the SemVer spec. It uses
/// lexicographic ordering of dot-separated components. Identifiers consisting
/// of only digits are compared numerically. Otherwise, identifiers are compared
/// in ASCII sort order. Any numeric identifier is always less than any
/// non-numeric identifier.
///
/// Example: `alpha` < `alpha.85` < `alpha.90` < `alpha.200` < `alpha.0a` < `alpha.1a0` < `alpha.a` < `beta`
#[derive(Default, Clone, Eq, PartialEq, Hash)]
pub struct Prerelease {
identifier: Identifier,
}
/// Optional build metadata identifier. This comes after `+` in a SemVer
/// version, as in `0.8.1+zstd.1.5.0`.
///
/// # Examples
///
/// Some real world build metadata idioms drawn from crates.io:
///
/// - **[libgit2-sys]** <code>0.12.20+<b>1.1.0</b></code> — for this
/// crate, the build metadata indicates the version of the C libgit2 library
/// that the Rust crate is built against.
///
/// - **[mashup]** <code>0.1.13+<b>deprecated</b></code> — just the word
/// "deprecated" for a crate that has been superseded by another. Eventually
/// people will take notice of this in Cargo's build output where it lists the
/// crates being compiled.
///
/// - **[google-bigquery2]** <code>2.0.4+<b>20210327</b></code> — this
/// library is automatically generated from an official API schema, and the
/// build metadata indicates the date on which that schema was last captured.
///
/// - **[fbthrift-git]** <code>0.0.6+<b>c7fcc0e</b></code> — this crate is
/// published from snapshots of a big company monorepo. In monorepo
/// development, there is no concept of versions, and all downstream code is
/// just updated atomically in the same commit that breaking changes to a
/// library are landed. Therefore for crates.io purposes, every published
/// version must be assumed to be incompatible with the previous. The build
/// metadata provides the source control hash of the snapshotted code.
///
/// [libgit2-sys]: https://crates.io/crates/libgit2-sys
/// [mashup]: https://crates.io/crates/mashup
/// [google-bigquery2]: https://crates.io/crates/google-bigquery2
/// [fbthrift-git]: https://crates.io/crates/fbthrift-git
///
/// # Syntax
///
/// Build metadata is a series of dot separated identifiers immediately
/// following the patch or pre-release version. Identifiers must comprise only
/// ASCII alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must
/// not be empty. Leading zeros *are* allowed, unlike any other place in the
/// SemVer grammar.
///
/// # Total ordering
///
/// Build metadata is ignored in evaluating `VersionReq`; it plays no role in
/// whether a `Version` matches any one of the comparison operators.
///
/// However for comparing build metadatas among one another, they do have a
/// total order which is determined by lexicographic ordering of dot-separated
/// components. Identifiers consisting of only digits are compared numerically.
/// Otherwise, identifiers are compared in ASCII sort order. Any numeric
/// identifier is always less than any non-numeric identifier.
///
/// Example: `demo` < `demo.85` < `demo.90` < `demo.090` < `demo.200` < `demo.1a0` < `demo.a` < `memo`
#[derive(Default, Clone, Eq, PartialEq, Hash)]
pub struct BuildMetadata {
identifier: Identifier,
}
impl Version {
/// Create `Version` with an empty pre-release and build metadata.
///
/// Equivalent to:
///
/// ```
/// # use semver::{BuildMetadata, Prerelease, Version};
/// #
/// # const fn new(major: u64, minor: u64, patch: u64) -> Version {
/// Version {
/// major,
/// minor,
/// patch,
/// pre: Prerelease::EMPTY,
/// build: BuildMetadata::EMPTY,
/// }
/// # }
/// ```
pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
Version {
major,
minor,
patch,
pre: Prerelease::EMPTY,
build: BuildMetadata::EMPTY,
}
}
/// Create `Version` by parsing from string representation.
///
/// # Errors
///
/// Possible reasons for the parse to fail include:
///
/// - `1.0` — too few numeric components. A SemVer version must have
/// exactly three. If you are looking at something that has fewer than
/// three numbers in it, it's possible it is a `VersionReq` instead (with
/// an implicit default `^` comparison operator).
///
/// - `1.0.01` — a numeric component has a leading zero.
///
/// - `1.0.unknown` — unexpected character in one of the components.
///
/// - `1.0.0-` or `1.0.0+` — the pre-release or build metadata are
/// indicated present but empty.
///
/// - `1.0.0-alpha_123` — pre-release or build metadata have something
/// outside the allowed characters, which are `0-9`, `A-Z`, `a-z`, `-`,
/// and `.` (dot).
///
/// - `23456789999999999999.0.0` — overflow of a u64.
pub fn parse(text: &str) -> Result<Self, Error> {
Version::from_str(text)
}
}
impl VersionReq {
/// A `VersionReq` with no constraint on the version numbers it matches.
/// Equivalent to `VersionReq::parse("*").unwrap()`.
///
/// In terms of comparators this is equivalent to `>=0.0.0`.
///
/// Counterintuitively a `*` VersionReq does not match every possible
/// version number. In particular, in order for *any* `VersionReq` to match
/// a pre-release version, the `VersionReq` must contain at least one
/// `Comparator` that has an explicit major, minor, and patch version
/// identical to the pre-release being matched, and that has a nonempty
/// pre-release component. Since `*` is not written with an explicit major,
/// minor, and patch version, and does not contain a nonempty pre-release
/// component, it does not match any pre-release versions.
#[cfg(not(no_const_vec_new))] // rustc <1.39
pub const STAR: Self = VersionReq {
comparators: Vec::new(),
};
/// Create `VersionReq` by parsing from string representation.
///
/// # Errors
///
/// Possible reasons for the parse to fail include:
///
/// - `>a.b` — unexpected characters in the partial version.
///
/// - `@1.0.0` — unrecognized comparison operator.
///
/// - `^1.0.0, ` — unexpected end of input.
///
/// - `>=1.0 <2.0` — missing comma between comparators.
///
/// - `*.*` — unsupported wildcard syntax.
pub fn parse(text: &str) -> Result<Self, Error> {
VersionReq::from_str(text)
}
/// Evaluate whether the given `Version` satisfies the version requirement
/// described by `self`.
pub fn matches(&self, version: &Version) -> bool {
eval::matches_req(self, version)
}
}
impl Comparator {
pub fn parse(text: &str) -> Result<Self, Error> {
Comparator::from_str(text)
}
pub fn matches(&self, version: &Version) -> bool {
eval::matches_comparator(self, version)
}
}
impl Prerelease {
pub const EMPTY: Self = Prerelease {
identifier: Identifier::empty(),
};
pub fn new(text: &str) -> Result<Self, Error> {
Prerelease::from_str(text)
}
pub fn as_str(&self) -> &str {
self.identifier.as_str()
}
pub fn is_empty(&self) -> bool {
self.identifier.is_empty()
}
}
impl BuildMetadata {
pub const EMPTY: Self = BuildMetadata {
identifier: Identifier::empty(),
};
pub fn new(text: &str) -> Result<Self, Error> {
BuildMetadata::from_str(text)
}
pub fn as_str(&self) -> &str {
self.identifier.as_str()
}
pub fn is_empty(&self) -> bool {
self.identifier.is_empty()
}
}
| true |
48d11db5ccb940b2f1803cbd9b0a3329d5c2043c
|
Rust
|
vikrem/linkerd2-proxy
|
/linkerd/proxy/detect/src/lib.rs
|
UTF-8
| 4,987 | 2.609375 | 3 |
[
"Apache-2.0"
] |
permissive
|
use linkerd2_error::Error;
use linkerd2_io::{BoxedIo, Peek};
use linkerd2_proxy_core as core;
use pin_project::{pin_project, project};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// A strategy for detecting values out of a client transport.
pub trait Detect<T>: Clone {
type Target;
/// If the target can be determined by the target alone (i.e. because it's
/// known to be a server-speaks-first target), Otherwise, the target is
/// returned as an error.
fn detect_before_peek(&self, target: T) -> Result<Self::Target, T>;
/// If the target could not be determined without peeking, then used the
/// peeked prefix to determine the protocol.
fn detect_peeked_prefix(&self, target: T, prefix: &[u8]) -> Self::Target;
}
#[derive(Debug, Clone)]
pub struct DetectProtocolLayer<D> {
detect: D,
peek_capacity: usize,
}
#[derive(Debug, Clone)]
pub struct DetectProtocol<D, A> {
detect: D,
accept: A,
peek_capacity: usize,
}
#[pin_project]
pub struct AcceptFuture<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
{
#[pin]
state: State<T, D, A>,
}
#[pin_project]
enum State<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
{
Accept(#[pin] A::Future),
Detect {
detect: D,
accept: A,
#[pin]
inner: PeekAndDetect<T, D>,
},
}
#[pin_project]
pub enum PeekAndDetect<T, D: Detect<T>> {
// Waiting for accept to become ready.
Detected(Option<(D::Target, BoxedIo)>),
// Waiting for the prefix to be read.
Peek(Option<T>, #[pin] Peek<BoxedIo>),
}
impl<D> DetectProtocolLayer<D> {
const DEFAULT_CAPACITY: usize = 8192;
pub fn new(detect: D) -> Self {
Self {
detect,
peek_capacity: Self::DEFAULT_CAPACITY,
}
}
}
impl<D: Clone, A> tower::layer::Layer<A> for DetectProtocolLayer<D> {
type Service = DetectProtocol<D, A>;
fn layer(&self, accept: A) -> Self::Service {
Self::Service {
detect: self.detect.clone(),
peek_capacity: self.peek_capacity,
accept,
}
}
}
impl<T, D, A> tower::Service<(T, BoxedIo)> for DetectProtocol<D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)> + Clone,
D::Target: std::fmt::Debug,
{
type Response = A::ConnectionFuture;
type Error = Error;
type Future = AcceptFuture<T, D, A>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.accept.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, (target, io): (T, BoxedIo)) -> Self::Future {
match self.detect.detect_before_peek(target) {
Ok(detected) => AcceptFuture {
state: State::Accept(self.accept.accept((detected, io))),
},
Err(target) => AcceptFuture {
state: State::Detect {
detect: self.detect.clone(),
accept: self.accept.clone(),
inner: PeekAndDetect::Peek(
Some(target),
Peek::with_capacity(self.peek_capacity, io),
),
},
},
}
}
}
impl<T, D, A> Future for AcceptFuture<T, D, A>
where
D: Detect<T>,
A: core::listen::Accept<(D::Target, BoxedIo)>,
A::Error: Into<Error>,
{
type Output = Result<A::ConnectionFuture, Error>;
#[project]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
#[project]
match this.state.as_mut().project() {
State::Accept(fut) => return fut.poll(cx).map_err(Into::into),
State::Detect {
detect,
accept,
mut inner,
} =>
{
#[project]
match inner.as_mut().project() {
PeekAndDetect::Peek(target, peek) => {
let io = futures::ready!(peek.poll(cx))?;
let target = detect.detect_peeked_prefix(
target.take().expect("polled after complete"),
io.prefix().as_ref(),
);
inner.set(PeekAndDetect::Detected(Some((target, BoxedIo::new(io)))));
}
PeekAndDetect::Detected(io) => {
futures::ready!(accept.poll_ready(cx)).map_err(Into::into)?;
let io = io.take().expect("polled after complete");
let accept = accept.accept(io);
this.state.set(State::Accept(accept));
}
}
}
}
}
}
}
| true |
b6d81c360fc9e5655ca5c00740708a5d33511de0
|
Rust
|
aldhsu/fuzzy-matcher
|
/src/skim.rs
|
UTF-8
| 10,511 | 2.8125 | 3 |
[
"MIT"
] |
permissive
|
///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
&[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
}
| true |
c32c64587b0b9c1ef7b7f2f9c2a046575b6ebf8e
|
Rust
|
lukisko/rust_chap_10
|
/src/main.rs
|
UTF-8
| 1,857 | 3.65625 | 4 |
[] |
no_license
|
fn main() {
println!("Hello, world!");
let poin = Point {x:10.0,y:20.0};
println!("x part of point is {} and distance from origin is {}.",poin.x(),poin.distance_from_origin());
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &number in list {
if number > largest {
largest = number;
}
}
return largest;
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
impl Point<f32> {
fn distance_from_origin(&self) -> f32 {
(self.x.powi(2) + self.y.powi(2)).sqrt()
}
}
//trait is interface
pub trait Summary{
fn summarize(&self) -> String{
String::from("Read more...")
}
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
//using default implementation
impl Summary for NewsArticle {}
//to return trait
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from(
"of course, as you probably already know, people",
),
reply: false,
retweet: false,
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
}
pub trait Sum {}
// to play what should types implement just ad plus and they need to implement both
pub trait Test {
fn some_function<T, U>(t: &T, u: &U) -> i32
where T: Summary + Clone,
U: Clone + Sum;
}
//lifetime of input and output (shorter one is used)
fn longest<'a>(x: &'a str, y: &'a str) -> &'a str {
if x.len() > y.len() {
x
} else {
y
}
}
| true |
74c1c06e568a2010de3eb55578c441a9c2c94dc9
|
Rust
|
SnakeSolid/rust-gantt-diagram
|
/src/database/mod.rs
|
UTF-8
| 4,849 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
mod error;
pub use self::error::DatabaseError;
pub use self::error::DatabaseResult;
use fallible_iterator::FallibleIterator;
use postgres::params::ConnectParams;
use postgres::params::Host;
use postgres::Connection;
use postgres::TlsMode;
use time::strptime;
use time::Timespec;
#[derive(Debug)]
pub struct PostgreSQL {
server: String,
port: u16,
user: String,
password: String,
}
const DEFAULT_DATABASE: &str = "postgres";
const FETCH_LIMIT: i32 = 1_000;
impl PostgreSQL {
pub fn new(server: &str, port: u16, user: &str, password: &str) -> PostgreSQL {
PostgreSQL {
server: server.into(),
port: port,
user: user.into(),
password: password.into(),
}
}
pub fn database_names(&self) -> DatabaseResult<Vec<String>> {
let connection = self.connect(None)?;
let mut result = Vec::new();
for row in &connection
.query(include_str!("sql/databases.sql"), &[])
.map_err(DatabaseError::query_execution_error)?
{
let name = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?;
result.push(
name.map_err(|error| DatabaseError::conversion_error(error, "database name"))?,
);
}
Ok(result)
}
pub fn stage_names(&self, database: &str) -> DatabaseResult<Vec<String>> {
let connection = self.connect(Some(database))?;
let mut result = Vec::new();
for row in &connection
.query(include_str!("sql/stages.sql"), &[])
.map_err(DatabaseError::query_execution_error)?
{
let name = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?;
result
.push(name.map_err(|error| DatabaseError::conversion_error(error, "maker name"))?);
}
Ok(result)
}
pub fn data<F, E>(
&self,
database: &str,
stage: &str,
mut callback: F,
) -> DatabaseResult<Result<(), E>>
where
F: FnMut(&str, Timespec, Timespec, &str, &str) -> Result<(), E>,
{
let connection = self.connect(Some(database))?;
let statement = connection
.prepare(include_str!("sql/data.sql"))
.map_err(DatabaseError::prepare_query_error)?;
let transaction = connection
.transaction()
.map_err(DatabaseError::transaction_error)?;
let mut rows = statement
.lazy_query(&transaction, &[&stage], FETCH_LIMIT)
.map_err(DatabaseError::query_execution_error)?;
while let Some(row) = rows.next().map_err(DatabaseError::query_execution_error)? {
let name: String = row
.get_opt(0)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "name"))?;
let start_time_str: String = row
.get_opt(1)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "start tame"))?;
let end_time_str: String = row
.get_opt(2)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "end time"))?;
let group: String = row
.get_opt(3)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "group name"))?;
let thread: String = row
.get_opt(4)
.ok_or_else(DatabaseError::column_not_exists)?
.map_err(|error| DatabaseError::conversion_error(error, "thread name"))?;
let start_time = strptime(&start_time_str, "%Y-%m-%d %H:%M:%S,%f")
.map_err(DatabaseError::time_parse_error)?
.to_timespec();
let end_time = strptime(&end_time_str, "%Y-%m-%d %H:%M:%S,%f")
.map_err(DatabaseError::time_parse_error)?
.to_timespec();
if let Err(err) = callback(&name, start_time, end_time, &group, &thread) {
return Ok(Err(err));
}
}
Ok(Ok(()))
}
fn connect(&self, database: Option<&str>) -> DatabaseResult<Connection> {
let password = Some(self.password.as_str()).filter(|w| !w.is_empty());
let params = ConnectParams::builder()
.port(self.port)
.user(&self.user, password)
.database(database.unwrap_or(DEFAULT_DATABASE))
.build(Host::Tcp(self.server.clone()));
Connection::connect(params, TlsMode::None).map_err(DatabaseError::connection_error)
}
}
| true |
d7165d8f059af78c4249d911b03d72631a456a78
|
Rust
|
adcopeland/peuler
|
/rust/peuler/src/bin/p10.rs
|
UTF-8
| 147 | 2.890625 | 3 |
[] |
no_license
|
fn main() {
let mut sum: u64 = 0;
for i in 1..2000000 {
if peuler::is_prime(i) {
sum += i as u64
}
}
println!("{}", sum);
}
| true |
a863b5d07a247b004ad140a36da00e116924662e
|
Rust
|
rhysd/Shiba
|
/v2/src/markdown/parser.rs
|
UTF-8
| 36,182 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
use super::sanitizer::{should_rebase_url, Sanitizer, SlashPath};
use crate::renderer::RawMessageWriter;
use aho_corasick::AhoCorasick;
use emojis::Emoji;
use memchr::{memchr_iter, Memchr};
use pulldown_cmark::{
Alignment, CodeBlockKind, CowStr, Event, HeadingLevel, LinkType, MathDisplay, Options, Parser,
Tag,
};
use std::cmp;
use std::collections::HashMap;
use std::io::{Read, Result, Write};
use std::iter::Peekable;
use std::marker::PhantomData;
use std::path::Path;
pub type Range = std::ops::Range<usize>;
pub trait TextVisitor: Default {
fn visit(&mut self, text: &str, range: &Range);
}
impl TextVisitor for () {
fn visit(&mut self, _text: &str, _range: &Range) {}
}
#[derive(Clone, Copy, Debug)]
pub enum TokenKind {
Normal,
MatchOther,
MatchCurrent,
MatchOtherStart,
MatchCurrentStart,
}
impl TokenKind {
fn tag(self) -> &'static str {
match self {
Self::MatchOther => "match",
Self::MatchCurrent => "match-current",
Self::MatchOtherStart => "match-start",
Self::MatchCurrentStart => "match-current-start",
Self::Normal => unreachable!(),
}
}
}
pub trait TextTokenizer {
fn tokenize<'t>(&mut self, text: &'t str, range: &Range) -> (TokenKind, &'t str);
}
impl TextTokenizer for () {
fn tokenize<'t>(&mut self, text: &'t str, _range: &Range) -> (TokenKind, &'t str) {
(TokenKind::Normal, text)
}
}
#[derive(Default)]
pub struct MarkdownContent {
source: String,
base_dir: SlashPath,
}
impl MarkdownContent {
pub fn new(source: String, base_dir: Option<&Path>) -> Self {
let base_dir =
if let Some(path) = base_dir { SlashPath::from(path) } else { SlashPath::default() };
Self { source, base_dir }
}
pub fn modified_offset(&self, new: &Self) -> Option<usize> {
let (prev_source, new_source) = (&self.source, &new.source);
prev_source
.as_bytes()
.iter()
.zip(new_source.as_bytes().iter())
.position(|(a, b)| a != b)
.or_else(|| {
let (prev_len, new_len) = (prev_source.len(), new_source.len());
(prev_len != new_len).then_some(cmp::min(prev_len, new_len))
})
}
pub fn is_empty(&self) -> bool {
self.source.is_empty() && self.base_dir.is_empty()
}
}
pub struct MarkdownParser<'a, V: TextVisitor, T: TextTokenizer> {
parser: Parser<'a, 'a>,
base_dir: &'a SlashPath,
offset: Option<usize>,
text_tokenizer: T,
_phantom: PhantomData<V>,
}
impl<'a, V: TextVisitor, T: TextTokenizer> MarkdownParser<'a, V, T> {
pub fn new(content: &'a MarkdownContent, offset: Option<usize>, text_tokenizer: T) -> Self {
let mut options = Options::empty();
options.insert(
Options::ENABLE_STRIKETHROUGH
| Options::ENABLE_FOOTNOTES
| Options::ENABLE_TABLES
| Options::ENABLE_TASKLISTS
| Options::ENABLE_MATH,
);
let parser = Parser::new_ext(&content.source, options);
let base_dir = &content.base_dir;
Self { parser, base_dir, offset, text_tokenizer, _phantom: PhantomData }
}
}
// Note: Build raw JavaScript expression which is evaluated to the render tree encoded as JSON value.
// This expression will be evaluated via `receive(JSON.parse('{"kind":"render_tree",...}'))` by renderer.
impl<'a, V: TextVisitor, T: TextTokenizer> RawMessageWriter for MarkdownParser<'a, V, T> {
type Output = V;
fn write_to(self, writer: impl Write) -> Result<Self::Output> {
let mut enc =
RenderTreeEncoder::new(writer, self.base_dir, self.offset, self.text_tokenizer);
enc.out.write_all(br#"JSON.parse('{"kind":"render_tree","tree":"#)?;
enc.push(self.parser)?;
enc.out.write_all(b"}')")?;
Ok(enc.text_visitor)
}
}
// To know the format of JSON value, see type definitions in web/ipc.ts
enum TableState {
Head,
Row,
}
// Note: Be careful, this function is called in the hot loop on encoding texts
#[inline]
#[allow(clippy::just_underscores_and_digits)]
fn encode_string_byte(mut out: impl Write, b: u8) -> Result<()> {
const BB: u8 = b'b'; // \x08
const TT: u8 = b't'; // \x09
const NN: u8 = b'n'; // \x0a
const FF: u8 = b'f'; // \x0c
const RR: u8 = b'r'; // \x0d
const DQ: u8 = b'"'; // \x22
const SQ: u8 = b'\''; // \x27
const BS: u8 = b'\\'; // \x5c
const XX: u8 = 1; // \x00...\x1f non-printable
const __: u8 = 0;
#[rustfmt::skip]
const ESCAPE_TABLE: [u8; 256] = [
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
XX, XX, XX, XX, XX, XX, XX, XX, BB, TT, NN, XX, FF, RR, XX, XX, // 0
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 1
__, __, DQ, __, __, __, __, SQ, __, __, __, __, __, __, __, __, // 2
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4
__, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, XX, // 7
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F
];
match ESCAPE_TABLE[b as usize] {
__ => out.write_all(&[b]),
BS => out.write_all(br#"\\\\"#), // Escape twice for JS and JSON (\\\\ → \\ → \)
SQ => out.write_all(br#"\'"#), // JSON string will be put in '...' JS string. ' needs to be escaped
XX => write!(out, r#"\\u{:04x}"#, b),
b => out.write_all(&[b'\\', b'\\', b]), // Escape \ itself: JSON.parse('\\n')
}
}
struct StringContentEncoder<W: Write>(W);
impl<W: Write> Write for StringContentEncoder<W> {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
for b in buf.iter().copied() {
encode_string_byte(&mut self.0, b)?;
}
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> {
self.0.flush()
}
}
struct RawHtmlReader<'a, I: Iterator<Item = (Event<'a>, Range)>> {
current: CowStr<'a>,
index: usize,
events: Peekable<I>,
stack: usize,
}
impl<'a, I: Iterator<Item = (Event<'a>, Range)>> RawHtmlReader<'a, I> {
fn new(current: CowStr<'a>, events: Peekable<I>) -> Self {
Self { current, index: 0, events, stack: 1 }
}
fn read_byte(&mut self) -> Option<u8> {
// Current event was consumed. Fetch next event otherwise return `None`.
while self.current.len() <= self.index {
if !matches!(self.events.peek(), Some((Event::Html(_) | Event::Text(_), _)))
|| self.stack == 0
{
return None;
}
self.current = match self.events.next().unwrap().0 {
Event::Html(html) => {
if html.starts_with("</") {
self.stack -= 1;
} else {
self.stack += 1;
}
html
}
Event::Text(text) => text,
_ => unreachable!(),
};
self.index = 0;
}
let b = self.current.as_bytes()[self.index];
self.index += 1;
Some(b)
}
}
impl<'a, I: Iterator<Item = (Event<'a>, Range)>> Read for RawHtmlReader<'a, I> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
for (i, dest) in buf.iter_mut().enumerate() {
if let Some(b) = self.read_byte() {
*dest = b;
} else {
return Ok(i);
}
}
Ok(buf.len())
}
}
struct RenderTreeEncoder<'a, W: Write, V: TextVisitor, T: TextTokenizer> {
out: W,
base_dir: &'a SlashPath,
table: TableState,
is_start: bool,
ids: HashMap<CowStr<'a>, usize>,
modified: Option<usize>,
text_visitor: V,
text_tokenizer: T,
autolinker: Autolinker,
sanitizer: Sanitizer<'a>,
in_code_block: bool,
}
impl<'a, W: Write, V: TextVisitor, T: TextTokenizer> RenderTreeEncoder<'a, W, V, T> {
fn new(w: W, base_dir: &'a SlashPath, modified: Option<usize>, text_tokenizer: T) -> Self {
Self {
out: w,
base_dir,
table: TableState::Head,
is_start: true,
ids: HashMap::new(),
modified,
text_visitor: V::default(),
text_tokenizer,
autolinker: Autolinker::default(),
sanitizer: Sanitizer::new(base_dir),
in_code_block: false,
}
}
fn push(&mut self, parser: Parser<'a, 'a>) -> Result<()> {
self.out.write_all(b"[")?;
self.events(parser)?;
// Modified offset was not consumed by any text, it would mean that some non-text parts after any text were
// modified. As a fallback, set 'modified' marker after the last text.
if self.modified.is_some() {
self.tag("modified")?;
self.out.write_all(b"}")?;
}
self.out.write_all(b"]")
}
fn string_content(&mut self, s: &str) -> Result<()> {
for b in s.as_bytes().iter().copied() {
encode_string_byte(&mut self.out, b)?;
}
Ok(())
}
fn string(&mut self, s: &str) -> Result<()> {
self.out.write_all(b"\"")?;
self.string_content(s)?;
self.out.write_all(b"\"")
}
fn alignment(&mut self, a: Alignment) -> Result<()> {
self.out.write_all(match a {
Alignment::None => b"null",
Alignment::Left => br#""left""#,
Alignment::Center => br#""center""#,
Alignment::Right => br#""right""#,
})
}
fn id(&mut self, name: CowStr<'a>) -> usize {
let new = self.ids.len() + 1;
*self.ids.entry(name).or_insert(new)
}
fn comma(&mut self) -> Result<()> {
if !self.is_start {
self.out.write_all(b",")?;
} else {
self.is_start = false;
}
Ok(())
}
fn tag(&mut self, name: &str) -> Result<()> {
self.comma()?;
write!(self.out, r#"{{"t":"{}""#, name)
}
fn text_tokens(&mut self, mut input: &str, mut range: Range) -> Result<()> {
use TokenKind::*;
while !input.is_empty() {
let (token, text) = self.text_tokenizer.tokenize(input, &range);
match token {
Normal => {
self.comma()?;
self.string(text)?;
}
MatchOther | MatchCurrent | MatchOtherStart | MatchCurrentStart => {
self.tag(token.tag())?;
self.children_begin()?;
self.string(text)?;
self.tag_end()?;
}
}
input = &input[text.len()..];
range.start += text.len();
}
Ok(())
}
fn text(&mut self, text: &str, range: Range) -> Result<()> {
self.text_visitor.visit(text, &range);
let Some(offset) = self.modified else {
return self.text_tokens(text, range);
};
let Range { start, end } = range;
if end < offset {
return self.text_tokens(text, range);
}
// Handle the last modified offset with this text token
self.modified = None;
log::debug!("Handling last modified offset: {:?}", offset);
if offset <= start {
self.tag("modified")?;
self.out.write_all(b"}")?;
self.text_tokens(text, range)
} else if end == offset {
self.text_tokens(text, range)?;
self.tag("modified")?;
self.out.write_all(b"}")
} else {
let i = offset - start;
self.text_tokens(&text[..i], range.start..offset)?;
self.tag("modified")?;
self.out.write_all(b"}")?;
self.text_tokens(&text[i..], offset..range.end)
}
}
fn emoji_text(&mut self, text: &str, range: Range) -> Result<()> {
let mut start = range.start;
for token in EmojiTokenizer::new(text) {
match token {
EmojiToken::Text(text) => {
if !text.is_empty() {
self.text(text, start..start + text.len())?;
start += text.len();
}
}
EmojiToken::Emoji(emoji, len) => {
self.tag("emoji")?;
self.out.write_all(br#","name":"#)?;
self.string(emoji.name())?;
self.children_begin()?;
self.string(emoji.as_str())?;
self.tag_end()?;
start += len;
}
}
}
// Note: When some escaped text is included in input like "&", `start == range.end` invariant is violated here.
// That's OK because pulldown-cmark tokenizes any escaped text as small as possible to reduce extra heap allocation.
// For instance "foo & bar" is tokenized into three events Text("foo "), Text("&"), Test(" bar"). It means that
// any escaped charactor is followed by no text within the token.
Ok(())
}
fn autolink_text(&mut self, mut text: &str, range: Range) -> Result<()> {
let Range { mut start, end } = range;
while let Some((s, e)) = self.autolinker.find_autolink(text) {
if s > 0 {
self.emoji_text(&text[..s], start..start + s)?;
}
let url = &text[s..e];
log::debug!("Auto-linking URL: {}", url);
self.tag("a")?;
self.out.write_all(br#","auto":true,"href":"#)?;
self.string(url)?;
self.children_begin()?;
self.text(url, start + s..start + e)?;
self.tag_end()?;
text = &text[e..];
start += e;
}
if !text.is_empty() {
self.emoji_text(text, start..end)?;
}
Ok(())
}
fn events(&mut self, parser: Parser<'a, 'a>) -> Result<()> {
use Event::*;
let mut events = parser.into_offset_iter().peekable();
while let Some((event, range)) = events.next() {
match event {
Start(tag) => {
let next_event = events.peek().map(|(e, _)| e);
self.start_tag(tag, next_event)?;
}
End(tag) => self.end_tag(tag)?,
Text(text) if self.in_code_block => self.text(&text, range)?,
Text(text) => self.autolink_text(&text, range)?,
Code(text) => {
let pad = (range.len() - text.len()) / 2;
let inner_range = (range.start + pad)..(range.end - pad);
self.tag("code")?;
self.children_begin()?;
self.text(&text, inner_range)?;
self.tag_end()?;
}
Html(html) => {
self.tag("html")?;
self.out.write_all(br#","raw":""#)?;
let mut dst = StringContentEncoder(&mut self.out);
let mut src = RawHtmlReader::new(html, events);
self.sanitizer.clean(&mut dst, &mut src)?;
events = src.events;
self.out.write_all(br#""}"#)?;
}
SoftBreak => self.text("\n", range)?,
HardBreak => {
self.tag("br")?;
self.out.write_all(b"}")?;
}
Rule => {
self.tag("hr")?;
self.out.write_all(b"}")?;
}
FootnoteReference(name) => {
self.tag("fn-ref")?;
let id = self.id(name);
write!(self.out, r#","id":{}}}"#, id)?;
}
TaskListMarker(checked) => {
self.tag("checkbox")?;
write!(self.out, r#","checked":{}}}"#, checked)?;
}
Math(display, text) => {
self.tag("math")?;
write!(self.out, r#","inline":{},"expr":"#, display == MathDisplay::Inline)?;
self.string(&text)?;
self.out.write_all(b"}")?;
}
}
}
Ok(())
}
fn rebase_link(&mut self, dest: &str) -> Result<()> {
if !should_rebase_url(dest) {
return self.string(dest);
}
// Rebase 'foo/bar/' with '/path/to/base' as '/path/to/base/foo/bar'
self.out.write_all(b"\"")?;
self.string_content(self.base_dir)?;
if !dest.starts_with('/') {
self.out.write_all(b"/")?;
}
self.string_content(dest)?;
self.out.write_all(b"\"")
}
fn children_begin(&mut self) -> Result<()> {
self.is_start = true;
self.out.write_all(br#","c":["#)
}
fn tag_end(&mut self) -> Result<()> {
self.is_start = false;
self.out.write_all(b"]}")
}
fn start_tag(&mut self, tag: Tag<'a>, next: Option<&Event>) -> Result<()> {
use Tag::*;
match tag {
Paragraph => {
self.tag("p")?;
}
Heading(level, id, _) => {
self.tag("h")?;
let level: u8 = match level {
HeadingLevel::H1 => 1,
HeadingLevel::H2 => 2,
HeadingLevel::H3 => 3,
HeadingLevel::H4 => 4,
HeadingLevel::H5 => 5,
HeadingLevel::H6 => 6,
};
write!(self.out, r#","level":{}"#, level)?;
if let Some(id) = id {
self.out.write_all(br#","id":"#)?;
self.string(id)?;
}
}
Table(alignments) => {
self.tag("table")?;
self.out.write_all(br#","align":["#)?;
let mut alignments = alignments.into_iter();
if let Some(a) = alignments.next() {
self.alignment(a)?;
}
for a in alignments {
self.out.write_all(b",")?;
self.alignment(a)?;
}
self.out.write_all(b"]")?;
}
TableHead => {
self.table = TableState::Head;
self.tag("thead")?;
self.children_begin()?;
self.tag("tr")?;
}
TableRow => {
self.table = TableState::Row;
self.tag("tr")?;
}
TableCell => {
let tag = match self.table {
TableState::Head => "th",
TableState::Row => "td",
};
self.tag(tag)?;
}
BlockQuote => {
self.tag("blockquote")?;
}
CodeBlock(info) => {
self.tag("pre")?;
self.children_begin()?;
self.tag("code")?;
if let CodeBlockKind::Fenced(info) = info {
if let Some(lang) = info.split(' ').next() {
if !lang.is_empty() {
self.out.write_all(br#","lang":"#)?;
self.string(lang)?;
}
}
}
self.in_code_block = true;
}
List(Some(1)) => self.tag("ol")?,
List(Some(start)) => {
self.tag("ol")?;
write!(self.out, r#","start":{}"#, start)?;
}
List(None) => self.tag("ul")?,
Item => {
if let Some(Event::TaskListMarker(_)) = next {
self.tag("task-list")?;
} else {
self.tag("li")?;
}
}
Emphasis => self.tag("em")?,
Strong => self.tag("strong")?,
Strikethrough => self.tag("del")?,
Link(LinkType::Autolink, _, _) => return Ok(()), // Ignore autolink since it is linked by `Autolinker`
Link(link_type, dest, title) => {
self.tag("a")?;
self.out.write_all(br#","href":"#)?;
match link_type {
LinkType::Email => {
self.out.write_all(b"\"mailto:")?;
self.string_content(&dest)?;
self.out.write_all(b"\"")?;
}
_ => self.rebase_link(&dest)?,
}
if !title.is_empty() {
self.out.write_all(br#","title":"#)?;
self.string(&title)?;
}
}
Image(_link_type, dest, title) => {
self.tag("img")?;
if !title.is_empty() {
self.out.write_all(br#","title":"#)?;
self.string(&title)?;
}
self.out.write_all(br#","src":"#)?;
self.rebase_link(&dest)?;
}
FootnoteDefinition(name) => {
self.tag("fn-def")?;
if !name.is_empty() {
self.out.write_all(br#","name":"#)?;
self.string(&name)?;
}
let id = self.id(name);
write!(self.out, r#","id":{}"#, id)?;
}
}
// Tag element must have its children (maybe empty)
self.children_begin()
}
fn end_tag(&mut self, tag: Tag<'a>) -> Result<()> {
use Tag::*;
match tag {
Link(LinkType::Autolink, _, _) => Ok(()), // Ignore autolink since it is linked by `Autolinker`
Paragraph
| Heading(_, _, _)
| TableRow
| TableCell
| BlockQuote
| List(_)
| Item
| Emphasis
| Strong
| Strikethrough
| Link(_, _, _)
| Image(_, _, _)
| FootnoteDefinition(_) => self.tag_end(),
CodeBlock(_) => {
self.in_code_block = false;
self.tag_end()?;
self.tag_end()
}
Table(_) => {
self.tag_end()?;
self.tag_end()
}
TableHead => {
self.tag_end()?;
self.tag_end()?;
self.tag("tbody")?;
self.children_begin()
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum UrlCharKind {
Invalid,
Term,
NonTerm,
}
impl UrlCharKind {
fn of(c: char) -> Self {
// https://www.rfc-editor.org/rfc/rfc3987
match c {
'\u{00}'..='\u{1F}'
| ' '
| '|'
| '"'
| '<'
| '>'
| '`'
| '('
| ')'
| '['
| ']'
| '\u{7F}'..='\u{9F}' => Self::Invalid,
'?' | '!' | '.' | ',' | ':' | ';' | '*' | '&' | '\\' | '{' | '}' | '\'' => {
Self::NonTerm
}
_ => Self::Term,
}
}
}
struct Autolinker(AhoCorasick);
impl Default for Autolinker {
fn default() -> Self {
Self(AhoCorasick::new(["https://", "http://"]).unwrap())
}
}
impl Autolinker {
fn find_autolink(&self, text: &str) -> Option<(usize, usize)> {
for mat in self.0.find_iter(text) {
let (start, scheme_end) = (mat.start(), mat.end());
if let Some(c) = text[..start].chars().next_back() {
if c.is_ascii_alphabetic() {
// Note: "foohttp://example.com" is not URL but "123http://example.com" contains URL
continue;
}
}
let mut len = 0;
for (i, c) in text[scheme_end..].char_indices() {
match UrlCharKind::of(c) {
UrlCharKind::Invalid => break,
UrlCharKind::Term => {
len = i + c.len_utf8();
}
UrlCharKind::NonTerm => {}
}
}
if len > 0 {
return Some((start, scheme_end + len));
}
}
None
}
}
#[derive(Debug)]
enum EmojiToken<'a> {
Text(&'a str),
Emoji(&'static Emoji, usize),
}
struct EmojiTokenizer<'a> {
text: &'a str,
iter: Memchr<'a>,
start: usize,
}
impl<'a> EmojiTokenizer<'a> {
fn new(text: &'a str) -> Self {
Self { iter: memchr_iter(b':', text.as_bytes()), text, start: 0 }
}
fn eat(&mut self, end: usize) -> &'a str {
let text = &self.text[self.start..end];
self.start = end;
text
}
}
impl<'a> Iterator for EmojiTokenizer<'a> {
type Item = EmojiToken<'a>;
// Tokenizing example:
// "foo :dog: bar :piyo: wow"
// -> ":dog: bar :piyo: wow" (text "foo ")
// -> " bar :piyo: wow" (emoji "dog")
// -> ":piyo: wow" (text " bar ")
// -> ": wow" (text ":piyo")
// -> "" (text ": wow")
fn next(&mut self) -> Option<Self::Item> {
if self.start == self.text.len() {
return None;
}
let Some(end) = self.iter.next() else {
return Some(EmojiToken::Text(self.eat(self.text.len()))); // Eat all of rest
};
if self.start == end {
// Edge case: The initial input text starts with ':'
return self.next();
}
if !self.text[self.start..].starts_with(':') {
return Some(EmojiToken::Text(self.eat(end)));
}
// Note:
// text[start..end+1] == ":dog:"
// text[start+1..end] == "dog"
// text[start..end] == ":dog"
let short = &self.text[self.start + 1..end];
if let Some(emoji) = emojis::get_by_shortcode(short) {
self.start = end + 1;
Some(EmojiToken::Emoji(emoji, short.len() + 2))
} else {
Some(EmojiToken::Text(self.eat(end)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::path::PathBuf;
fn load_data(name: &str) -> String {
let mut path = PathBuf::from("src");
path.push("markdown");
path.push("testdata");
path.push(format!("{}.md", name));
match fs::read_to_string(&path) {
Ok(text) => text,
Err(err) => panic!("Could not find Markdown test data at {:?}: {}", path, err),
}
}
macro_rules! snapshot_test {
($name:ident, $offset:expr, $basedir:expr) => {
#[test]
fn $name() {
let source = load_data(stringify!($name));
let target = MarkdownContent::new(source, $basedir);
let parser = MarkdownParser::new(&target, $offset, ());
let mut buf = Vec::new();
let () = parser.write_to(&mut buf).unwrap();
let buf = String::from_utf8(buf).unwrap();
// Revert extra escape for '...' JavaScript string
let buf = buf.replace("\\\\", "\\");
// Remove the `JSON.parse` call to restore JSON value passed to the function
let buf = buf.strip_prefix("JSON.parse('").unwrap();
let buf = buf.strip_suffix("')").unwrap();
// Check if the written output is in the valid JSON format
let json: serde_json::Value = match serde_json::from_str(buf) {
Ok(value) => value,
Err(err) => {
panic!("Invalid JSON input with error \"{}\": {}", err, buf);
}
};
insta::assert_json_snapshot!(json);
}
};
($name:ident) => {
snapshot_test!($name, None, None);
};
($name:ident, $offset:expr) => {
snapshot_test!($name, $offset, None);
};
}
snapshot_test!(paragraph);
snapshot_test!(blockquote);
snapshot_test!(list);
snapshot_test!(headings);
snapshot_test!(codeblock);
snapshot_test!(link);
snapshot_test!(html);
snapshot_test!(sanitized);
snapshot_test!(inline_code);
snapshot_test!(emphasis);
snapshot_test!(image);
snapshot_test!(autolink);
snapshot_test!(emoji);
snapshot_test!(table);
snapshot_test!(math);
snapshot_test!(strikethrough);
snapshot_test!(tasklist);
snapshot_test!(footnotes);
snapshot_test!(highlight);
snapshot_test!(not_link);
// Offset
snapshot_test!(offset_block, Some(30));
snapshot_test!(offset_begin, Some(0));
snapshot_test!(offset_after_end, Some(10000000));
snapshot_test!(offset_in_emphasis, Some(10));
// Relative link resolutions
#[cfg(target_os = "windows")]
const BASE_DIR: &str = r#"\a\b\c\d\e"#;
#[cfg(not(target_os = "windows"))]
const BASE_DIR: &str = "/a/b/c/d/e";
snapshot_test!(relative_links, None, Some(Path::new(BASE_DIR)));
mod visitor {
use super::*;
use crate::markdown::DisplayText;
macro_rules! snapshot_test {
($name:ident) => {
#[test]
fn $name() {
let source = load_data(stringify!($name));
let content = MarkdownContent::new(source, None);
let parser = MarkdownParser::new(&content, None, ());
let mut buf = Vec::new();
let visitor: DisplayText = parser.write_to(&mut buf).unwrap();
let text = &visitor.raw_text();
let source = &content.source;
let mut mapped = vec![];
for map in visitor.sourcemap() {
let slice = &source[map.clone()];
assert!(
source.contains(slice),
"{:?} does not contain {:?}",
source,
text,
);
mapped.push((slice, map.clone()));
}
insta::assert_debug_snapshot!((text, mapped));
}
};
}
snapshot_test!(paragraph);
snapshot_test!(blockquote);
snapshot_test!(list);
snapshot_test!(headings);
snapshot_test!(codeblock);
snapshot_test!(link);
snapshot_test!(html);
snapshot_test!(sanitized);
snapshot_test!(inline_code);
snapshot_test!(emphasis);
snapshot_test!(image);
snapshot_test!(autolink);
snapshot_test!(emoji);
snapshot_test!(table);
snapshot_test!(math);
snapshot_test!(strikethrough);
snapshot_test!(tasklist);
snapshot_test!(footnotes);
snapshot_test!(highlight);
snapshot_test!(not_link);
}
#[test]
fn emoji_tokenizer() {
#[derive(PartialEq, Eq, Debug)]
enum Tok {
T(&'static str),
E(&'static str, usize),
}
for (input, expected) in [
(":dog:", &[Tok::E("dog face", 5)][..]),
(":nerd_face:", &[Tok::E("nerd face", 11)][..]),
(":+1:", &[Tok::E("thumbs up", 4)][..]),
(":-1:", &[Tok::E("thumbs down", 4)][..]),
(":dog::cat:", &[Tok::E("dog face", 5), Tok::E("cat face", 5)][..]),
(":dog: :cat:", &[Tok::E("dog face", 5), Tok::T(" "), Tok::E("cat face", 5)][..]),
(
" :dog: :cat: ",
&[
Tok::T(" "),
Tok::E("dog face", 5),
Tok::T(" "),
Tok::E("cat face", 5),
Tok::T(" "),
][..],
),
(
"hello :dog: world :cat: nyan",
&[
Tok::T("hello "),
Tok::E("dog face", 5),
Tok::T(" world "),
Tok::E("cat face", 5),
Tok::T(" nyan"),
][..],
),
("hello, world", &[Tok::T("hello, world")][..]),
("", &[][..]),
("dog:", &[Tok::T("dog"), Tok::T(":")][..]),
(":dog", &[Tok::T(":dog")][..]),
(":this-is-not-an-emoji:", &[Tok::T(":this-is-not-an-emoji"), Tok::T(":")][..]),
(
":not-emoji:not-emoji:dog:",
&[Tok::T(":not-emoji"), Tok::T(":not-emoji"), Tok::E("dog face", 5)][..],
),
(
":not-emoji:not-emoji:dog:",
&[Tok::T(":not-emoji"), Tok::T(":not-emoji"), Tok::E("dog face", 5)][..],
),
("::::", &[Tok::T(":"), Tok::T(":"), Tok::T(":"), Tok::T(":")][..]),
] {
let actual = EmojiTokenizer::new(input)
.map(|tok| match tok {
EmojiToken::Text(text) => Tok::T(text),
EmojiToken::Emoji(emoji, len) => Tok::E(emoji.name(), len),
})
.collect::<Vec<_>>();
assert_eq!(expected, actual, "input={:?}", input);
}
}
#[test]
fn auto_linker() {
for (input, url) in [
("http://example.com", Some("http://example.com")),
("https://example.com", Some("https://example.com")),
("http://example.com/foo", Some("http://example.com/foo")),
("http://example.com/foo/", Some("http://example.com/foo/")),
("http://example.com&foo=bar", Some("http://example.com&foo=bar")),
("hello http://example.com world", Some("http://example.com")),
("[foo](http://example.com)", Some("http://example.com")),
("[http://example.com]", Some("http://example.com")),
("Nice URL https://example.com!", Some("https://example.com")),
("This is URL https://example.com.", Some("https://example.com")),
("Is this URL https://example.com?", Some("https://example.com")),
("He said 'https://example.com'", Some("https://example.com")),
("Open https://example.com, and click button", Some("https://example.com")),
("https://example.com&", Some("https://example.com")),
("123http://aaa.com", Some("http://aaa.com")),
("file:///foo/bar", None),
("", None),
("hello, world", None),
("http:", None),
("http://", None),
("foohttp://aaa.com", None),
] {
let found = Autolinker::default().find_autolink(input);
assert_eq!(
url.is_some(),
found.is_some(),
"input={input:?}, found={found:?}, expected={url:?}",
);
if let Some(url) = url {
let (s, e) = found.unwrap();
assert_eq!(url, &input[s..e]);
}
}
}
}
| true |
ea6f2d2b290899bebb21159217cfdf59f8c8008c
|
Rust
|
fnune/exercises
|
/linked_list/linked_list.rs
|
UTF-8
| 3,121 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#![feature(alloc)]
#![feature(shared)]
extern crate alloc;
extern crate core;
use alloc::boxed::{Box};
use core::ptr::{Shared};
struct Node<T> {
content: T,
next: Option<Shared<Node<T>>>,
}
impl<T> Node<T> {
fn new(content: T) -> Self {
Node {
next: None,
content,
}
}
fn pluck_content(node: Box<Self>) -> T {
node.content
}
}
struct FLinkedList<T> {
head: Option<Shared<Node<T>>>,
len: usize,
}
impl<T> FLinkedList<T> {
pub fn new() -> FLinkedList<T> {
FLinkedList {
head: None,
len: 0,
}
}
pub fn prepend(&mut self, element: T) {
let node = Box::new(Node::new(element));
self.prepend_node(node);
}
pub fn pop_head(&mut self) -> Option<T> {
self.pop_head_node().map(Node::pluck_content)
}
pub fn at(&self, index: usize) -> Option<T> {
self.node_at(index).map(Node::pluck_content)
}
fn prepend_node(&mut self, mut node: Box<Node<T>>) {
unsafe {
node.next = self.head;
self.head = Some(Shared::new(Box::into_raw(node)));
self.len += 1;
}
}
fn pop_head_node(&mut self) -> Option<Box<Node<T>>> {
self.head.map(|node| unsafe {
let node = Box::from_raw(node.as_ptr());
self.head = node.next;
self.len -= 1;
node
})
}
fn node_at(&self, mut index: usize) -> Option<Box<Node<T>>> {
if index >= self.len { None } else {
let mut current = self.head;
while index > 0 {
unsafe {
current = match current {
Some(element) => Box::from_raw(element.as_ptr()).next,
_ => None,
}
}
index -= 1;
}
unsafe {
match current {
Some(element) => Some(Box::from_raw(element.as_ptr())),
_ => None,
}
}
}
}
}
fn main() {
println!("Singly linked list exercise.");
}
#[test]
fn prepend_extends_list_length() {
let mut my_linked_list: FLinkedList<i32> = FLinkedList::new();
my_linked_list.prepend(4);
my_linked_list.prepend(2);
assert_eq!(my_linked_list.len, 2);
}
#[test]
fn prepend_and_pop_head_work() {
let mut my_linked_list: FLinkedList<&str> = FLinkedList::new();
my_linked_list.prepend("there");
my_linked_list.prepend("hello");
assert_eq!(my_linked_list.pop_head(), Some("hello"));
assert_eq!(my_linked_list.pop_head(), Some("there"));
assert_eq!(my_linked_list.pop_head(), None);
}
#[test]
fn node_at_works() {
let mut my_linked_list: FLinkedList<&str> = FLinkedList::new();
my_linked_list.prepend("Hello");
my_linked_list.prepend("World");
assert_eq!(my_linked_list.at(0), Some("World"));
// Segfault here - Does not happen if run in the main program (?)
assert_eq!(my_linked_list.at(1), Some("Hello"));
assert_eq!(my_linked_list.at(2), None);
}
| true |
daf52c17056783913857ced97bf466c4c89609f4
|
Rust
|
lain-dono/klein-rs
|
/glsl_shim.rs
|
UTF-8
| 4,759 | 3.296875 | 3 |
[] |
no_license
|
pub fn swizzle_index(c: char) -> usize {
match c {
'x' => 0,
'y' => 1,
'z' => 2,
'w' => 3,
_ => unimplemented!(),
}
}
/*
#define SWIZZLE(a, b, c, d) \
swizzle<swizzle_index(#a[0]), \
swizzle_index(#b[0]), \
swizzle_index(#c[0]), \
swizzle_index(#d[0])> \
a##b##c##d
#define SWIZZLE_3(a, b, c) \
SWIZZLE(a, b, c, x); \
SWIZZLE(a, b, c, y); \
SWIZZLE(a, b, c, z); \
SWIZZLE(a, b, c, w);
#define SWIZZLE_2(a, b) \
SWIZZLE_3(a, b, x); \
SWIZZLE_3(a, b, y); \
SWIZZLE_3(a, b, z); \
SWIZZLE_3(a, b, w);
#define SWIZZLE_1(a) \
SWIZZLE_2(a, x); \
SWIZZLE_2(a, y); \
SWIZZLE_2(a, z); \
SWIZZLE_2(a, w);
#define SWIZZLES \
SWIZZLE_1(x); \
SWIZZLE_1(y); \
SWIZZLE_1(z); \
SWIZZLE_1(w)
// Redefine various glsl types and keywords
#define in
#define out
*/
pub union vec4 {
}
/*
struct vec4
{
template <uint8_t a, uint8_t b, uint8_t c, uint8_t d>
struct swizzle
{
constexpr operator vec4() const noexcept
{
float const* data = reinterpret_cast<float const*>(this);
return {data[a], data[b], data[c], data[d]};
}
template <uint8_t e, uint8_t f, uint8_t g, uint8_t h>
vec4 operator*(swizzle<e, f, g, h> const& other) const noexcept
{
return static_cast<vec4>(*this) * static_cast<vec4>(other);
}
vec4 operator*(vec4 const& other) const noexcept
{
return static_cast<vec4>(*this) * other;
}
};
template <uint8_t i>
struct component
{
operator float() const noexcept
{
return reinterpret_cast<float const*>(this)[i];
}
vec4 operator*(vec4 const& other) const noexcept
{
return other * static_cast<float>(*this);
}
float operator-() const noexcept
{
return -reinterpret_cast<float const*>(this)[i];
}
component& operator=(float other) noexcept
{
reinterpret_cast<float*>(this)[i] = other;
return *this;
}
};
union
{
float data[4];
component<0> x;
component<1> y;
component<2> z;
component<3> w;
SWIZZLES;
};
vec4() = default;
vec4(float a, float b, float c, float d) noexcept
: data{a, b, c, d}
{}
vec4 operator*(float other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * other;
}
return result;
}
vec4& operator*=(float other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] = data[i] * other;
}
return *this;
}
template <uint8_t j>
vec4 operator*(component<j> const& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * static_cast<float>(other);
}
return result;
}
template <uint8_t j>
vec4& operator*=(component<j> const& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] = data[i] * static_cast<float>(other);
}
return *this;
}
vec4 operator+(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] + other.data[i];
}
return result;
}
vec4 operator*(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] * other.data[i];
}
return result;
}
vec4 operator-(const vec4& other) const noexcept
{
vec4 result;
for (size_t i = 0; i != 4; ++i)
{
result.data[i] = data[i] - other.data[i];
}
return result;
}
vec4& operator+=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] += other.data[i];
}
return *this;
}
vec4& operator*=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] *= other.data[i];
}
return *this;
}
vec4& operator-=(const vec4& other) noexcept
{
for (size_t i = 0; i != 4; ++i)
{
data[i] -= other.data[i];
}
return *this;
}
};
float dot(vec4 const& a, vec4 const& b)
{
float result = 0;
for (size_t i = 0; i != 4; ++i)
{
result += a.data[i] * b.data[i];
}
return result;
}
*/
| true |
b6b508f2a95329f89a910c7d75cf8c779b10e643
|
Rust
|
nimiq/core-rs
|
/beserial/src/types.rs
|
UTF-8
| 6,198 | 2.96875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use crate::{Deserialize, ReadBytesExt, Serialize, SerializingError, WriteBytesExt};
use num;
#[allow(non_camel_case_types)]
#[derive(Ord, PartialOrd, Eq, PartialEq, Debug, Copy, Clone)]
pub struct uvar(u64);
impl From<uvar> for u64 {
fn from(u: uvar) -> Self { u.0 }
}
impl From<u64> for uvar {
fn from(u: u64) -> Self { uvar(u) }
}
impl num::FromPrimitive for uvar {
fn from_i64(n: i64) -> Option<Self> { if n < 0 { None } else { Some(uvar(n as u64)) } }
fn from_u64(n: u64) -> Option<Self> { Some(uvar(n)) }
}
impl num::ToPrimitive for uvar {
fn to_i64(&self) -> Option<i64> { if self.0 > i64::max_value() as u64 { None } else { Some(self.0 as i64) } }
fn to_u64(&self) -> Option<u64> { Some(self.0) }
}
impl Serialize for uvar {
fn serialize<W: WriteBytesExt>(&self, writer: &mut W) -> Result<usize, SerializingError> {
let mut size = 0;
if self.0 < 0x80 {
// Just that byte
size += Serialize::serialize(&(self.0 as u8), writer)?;
} else if self.0 < 0x4080 {
// +1 bytes
let x = self.0 - 0x80;
size += Serialize::serialize(&((x | 0x8000) as u16), writer)?;
} else if self.0 < 0x0020_4080 {
// +2 bytes
let x = self.0 - 0x4080;
size += Serialize::serialize(&(((x >> 8) | 0xC000) as u16), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x1020_4080 {
// +3 bytes
let x = self.0 - 0x0020_4080;
size += Serialize::serialize(&((x | 0xE000_0000) as u32), writer)?;
} else if self.0 < 0x0008_1020_4080 {
// +4 bytes
let x = self.0 - 0x1020_4080;
size += Serialize::serialize(&(((x >> 8) | 0xF000_0000) as u32), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x0408_1020_4080 {
// +5 bytes
let x = self.0 - 0x0008_1020_4080;
size += Serialize::serialize(&(((x >> 16) | 0xF800_0000) as u32), writer)?;
size += Serialize::serialize(&((x & 0xFFFF) as u16), writer)?;
} else if self.0 < 0x0002_0408_1020_4080 {
// +6 bytes
let x = self.0 - 0x0408_1020_4080;
size += Serialize::serialize(&(((x >> 24) | 0xFC00_0000) as u32), writer)?;
size += Serialize::serialize(&(((x >> 8) & 0xFFFF) as u16), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
} else if self.0 < 0x0102_0408_1020_4080 {
// +7 bytes
let x = self.0 - 0x0002_0408_1020_4080;
size += Serialize::serialize(&((x | 0xFE00_0000_0000_0000) as u64), writer)?;
} else {
// +8 bytes
let x = self.0 - 0x0102_0408_1020_4080;
size += Serialize::serialize(&(((x >> 8) | 0xFF00_0000_0000_0000) as u64), writer)?;
size += Serialize::serialize(&((x & 0xFF) as u8), writer)?;
}
Ok(size)
}
fn serialized_size(&self) -> usize {
if self.0 < 0x80 {
1
} else if self.0 < 0x4080 {
2
} else if self.0 < 0x0020_4080 {
3
} else if self.0 < 0x1020_4080 {
4
} else if self.0 < 0x0008_1020_4080 {
5
} else if self.0 < 0x0408_1020_4080 {
6
} else if self.0 < 0x0002_0408_1020_4080 {
7
} else if self.0 < 0x0102_0408_1020_4080 {
8
} else { 9 }
}
}
impl Deserialize for uvar {
fn deserialize<R: ReadBytesExt>(reader: &mut R) -> Result<Self, SerializingError> {
fn read<T: num::ToPrimitive + Deserialize, R: ReadBytesExt>(reader: &mut R) -> Result<u64, SerializingError> {
let n: T = Deserialize::deserialize(reader)?;
Ok(n.to_u64().unwrap())
}
let first_byte: u8 = Deserialize::deserialize(reader)?;
if first_byte == 0xFF {
// 8 bytes follow
let byte_1_8 = read::<u64, R>(reader)?;
if byte_1_8 > u64::max_value() - 0x0102_0408_1020_4080 {
return Err(SerializingError::Overflow);
}
Ok(uvar(byte_1_8 + 0x0102_0408_1020_4080))
} else if first_byte == 0xFE {
// 7 bytes follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_3 = read::<u16, R>(reader)?;
let byte_4_7 = read::<u32, R>(reader)?;
Ok(uvar((byte_1 << 48) + (byte_2_3 << 32) + byte_4_7 + 0x0002_0408_1020_4080))
} else if first_byte & 0xFC == 0xFC {
// 6 bytes follow
let byte_1_2 = read::<u16, R>(reader)?;
let byte_3_6 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x01) << 48) + (byte_1_2 << 32) + byte_3_6 + 0x0408_1020_4080))
} else if first_byte & 0xF8 == 0xF8 {
// 5 bytes to follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_5 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x03) << 40) + (byte_1 << 32) + byte_2_5 + 0x0008_1020_4080))
} else if first_byte & 0xF0 == 0xF0 {
// 4 bytes to follow
let byte_1_4 = read::<u32, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x07) << 32) + byte_1_4 + 0x1020_4080))
} else if first_byte & 0xE0 == 0xE0 {
// 3 bytes to follow
let byte_1 = read::<u8, R>(reader)?;
let byte_2_3 = read::<u16, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x0f) << 24) + (byte_1 << 16) + byte_2_3 + 0x0020_4080))
} else if first_byte & 0xC0 == 0xC0 {
// 2 bytes to follow
let byte_1_2 = read::<u16, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x1f) << 16) + byte_1_2 + 0x4080))
} else if first_byte & 0x80 == 0x80 {
// 1 byte follows
let byte_1 = read::<u8, R>(reader)?;
Ok(uvar(((u64::from(first_byte) & 0x3f) << 8) + byte_1 + 0x80))
} else {
// Just that byte
Ok(uvar(u64::from(first_byte)))
}
}
}
| true |
4ae52db70e06e474eaa4596b8c39bbea4af6524a
|
Rust
|
rust-lang/rust
|
/library/portable-simd/crates/core_simd/src/alias.rs
|
UTF-8
| 4,097 | 2.859375 | 3 |
[
"Apache-2.0",
"MIT",
"LLVM-exception",
"NCSA",
"BSD-2-Clause",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
macro_rules! number {
{ 1 } => { "one" };
{ 2 } => { "two" };
{ 4 } => { "four" };
{ 8 } => { "eight" };
{ $x:literal } => { stringify!($x) };
}
macro_rules! plural {
{ 1 } => { "" };
{ $x:literal } => { "s" };
}
macro_rules! alias {
{
$(
$element_ty:ty = {
$($alias:ident $num_elements:tt)*
}
)*
} => {
$(
$(
#[doc = concat!("A SIMD vector with ", number!($num_elements), " element", plural!($num_elements), " of type [`", stringify!($element_ty), "`].")]
#[allow(non_camel_case_types)]
pub type $alias = $crate::simd::Simd<$element_ty, $num_elements>;
)*
)*
}
}
macro_rules! mask_alias {
{
$(
$element_ty:ty : $size:literal = {
$($alias:ident $num_elements:tt)*
}
)*
} => {
$(
$(
#[doc = concat!("A SIMD mask with ", number!($num_elements), " element", plural!($num_elements), " for vectors with ", $size, " element types.")]
///
#[doc = concat!(
"The layout of this type is unspecified, and may change between platforms and/or Rust versions, and code should not assume that it is equivalent to `[",
stringify!($element_ty), "; ", $num_elements, "]`."
)]
#[allow(non_camel_case_types)]
pub type $alias = $crate::simd::Mask<$element_ty, $num_elements>;
)*
)*
}
}
alias! {
i8 = {
i8x1 1
i8x2 2
i8x4 4
i8x8 8
i8x16 16
i8x32 32
i8x64 64
}
i16 = {
i16x1 1
i16x2 2
i16x4 4
i16x8 8
i16x16 16
i16x32 32
i16x64 64
}
i32 = {
i32x1 1
i32x2 2
i32x4 4
i32x8 8
i32x16 16
i32x32 32
i32x64 64
}
i64 = {
i64x1 1
i64x2 2
i64x4 4
i64x8 8
i64x16 16
i64x32 32
i64x64 64
}
isize = {
isizex1 1
isizex2 2
isizex4 4
isizex8 8
isizex16 16
isizex32 32
isizex64 64
}
u8 = {
u8x1 1
u8x2 2
u8x4 4
u8x8 8
u8x16 16
u8x32 32
u8x64 64
}
u16 = {
u16x1 1
u16x2 2
u16x4 4
u16x8 8
u16x16 16
u16x32 32
u16x64 64
}
u32 = {
u32x1 1
u32x2 2
u32x4 4
u32x8 8
u32x16 16
u32x32 32
u32x64 64
}
u64 = {
u64x1 1
u64x2 2
u64x4 4
u64x8 8
u64x16 16
u64x32 32
u64x64 64
}
usize = {
usizex1 1
usizex2 2
usizex4 4
usizex8 8
usizex16 16
usizex32 32
usizex64 64
}
f32 = {
f32x1 1
f32x2 2
f32x4 4
f32x8 8
f32x16 16
f32x32 32
f32x64 64
}
f64 = {
f64x1 1
f64x2 2
f64x4 4
f64x8 8
f64x16 16
f64x32 32
f64x64 64
}
}
mask_alias! {
i8 : "8-bit" = {
mask8x1 1
mask8x2 2
mask8x4 4
mask8x8 8
mask8x16 16
mask8x32 32
mask8x64 64
}
i16 : "16-bit" = {
mask16x1 1
mask16x2 2
mask16x4 4
mask16x8 8
mask16x16 16
mask16x32 32
mask16x64 64
}
i32 : "32-bit" = {
mask32x1 1
mask32x2 2
mask32x4 4
mask32x8 8
mask32x16 16
mask32x32 32
mask32x64 64
}
i64 : "64-bit" = {
mask64x1 1
mask64x2 2
mask64x4 4
mask64x8 8
mask64x16 16
mask64x32 32
mask64x64 64
}
isize : "pointer-sized" = {
masksizex1 1
masksizex2 2
masksizex4 4
masksizex8 8
masksizex16 16
masksizex32 32
masksizex64 64
}
}
| true |
e776a4de65e77bd610a53df3de60d70b76f4baed
|
Rust
|
cGuille/adventofcode
|
/src/bin/2020-day4-part1.rs
|
UTF-8
| 759 | 3.015625 | 3 |
[] |
no_license
|
use std::collections::HashSet;
fn main() {
let batch = include_str!("../../input/2020-day4.txt");
let valid_passport_count = batch
.split("\n\n")
.filter(|passport_str| has_required_attributes(passport_str))
.count();
println!("{}", valid_passport_count);
}
fn has_required_attributes(passport_str: &str) -> bool {
let attr_set: HashSet<&str> = passport_str
.split_whitespace()
.map(|attr_str| attr_str.split(":").next().unwrap())
.collect();
attr_set.contains("byr")
&& attr_set.contains("iyr")
&& attr_set.contains("eyr")
&& attr_set.contains("hgt")
&& attr_set.contains("hcl")
&& attr_set.contains("ecl")
&& attr_set.contains("pid")
}
| true |
6df5798a8f6e64debc86bb99cb243cd84ebb9111
|
Rust
|
guillaumebreton/ruin
|
/src/main.rs
|
UTF-8
| 3,674 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
use chrono::NaiveDate;
use clap::{Parser, Subcommand};
use diesel::prelude::*;
use crossterm::{
event::{DisableMouseCapture, EnableMouseCapture},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use std::io;
use std::io::Error;
use tui::{backend::CrosstermBackend, Terminal};
use model::Service;
pub mod model;
pub mod ofx;
pub mod schema;
pub mod ui;
#[derive(Parser, Debug)]
#[clap(author = "Author Name", version, about)]
struct Arguments {
#[clap(short, long, default_value_t = String::from("ruin.db"),forbid_empty_values = true)]
/// the file to explore
db_path: String,
#[clap(subcommand)]
cmd: SubCommand,
}
#[derive(Subcommand, Debug)]
enum SubCommand {
/// Count how many times the package is used
Import {
#[clap(short, long, default_value_t = String::from("data.ofx"),forbid_empty_values = true)]
/// the file to explore
file_path: String,
},
View {},
}
// This macro from `diesel_migrations` defines an `embedded_migrations` module
// containing a function named `run`. This allows the example to be run and
// tested without any outside setup of the database.
embed_migrations!();
fn main() {
let args = Arguments::parse();
let connection = SqliteConnection::establish(&args.db_path)
.unwrap_or_else(|_| panic!("Error connecting to db"));
embedded_migrations::run_with_output(&connection, &mut std::io::stdout()).unwrap();
match args.cmd {
SubCommand::Import { file_path } => import(&connection, &file_path),
SubCommand::View {} => view(&connection).unwrap(),
}
}
fn view(connection: &SqliteConnection) -> Result<(), Error> {
let service = Service {
connection: connection,
};
// setup terminal
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
// create app and run it
let res = ui::run_app(&mut terminal, service);
// restore terminal
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
if let Err(err) = res {
println!("{:?}", err)
}
Ok(())
}
fn import(connection: &SqliteConnection, file_path: &str) {
let service = Service {
connection: connection,
};
let data = ofx::load(&file_path).unwrap();
let account_data = data.message.response.aggregate.account;
let balance = data
.message
.response
.aggregate
.available_balance
.amount
.parse::<f32>()
.unwrap();
let account = service
.upsert_account(
"",
&account_data.account_type,
&account_data.account_number,
(balance * 100.0) as i32,
)
.unwrap();
for tx in data
.message
.response
.aggregate
.transaction_list
.transactions
{
let date_posted = NaiveDate::parse_from_str(&tx.date_posted, "%Y%m%d").unwrap();
// TODO change the parse to use international format.
let amount = tx.amount.replace(",", ".").parse::<f32>().unwrap();
service
.upsert_transaction(
&tx.description,
date_posted,
&tx.id,
(amount * 100.0) as i32,
account.id,
)
.unwrap();
}
}
| true |
7cd4f120aa60dc79f2e38c804d383a2851c17fb5
|
Rust
|
juneym/rust-lang-tutorial
|
/src/cli.rs
|
UTF-8
| 344 | 3.34375 | 3 |
[] |
no_license
|
// sample cli's use of arguments
use std::env;
pub fn run() {
let args: Vec<String> = env::args().collect();
let command = args[1].clone(); //why do you have to use .clone()??
println!("\nargs: {:?}", args);
println!("\ncommand: {}", command);
if command == "hello" {
println!("Hey there. Hello!");
}
}
| true |
1a8de1b6017c58016b2e6bb96b4df43836dc20c0
|
Rust
|
akshayknarayan/simulator
|
/src/node/switch/drop_tail_queue.rs
|
UTF-8
| 3,712 | 3.015625 | 3 |
[] |
no_license
|
use std::collections::VecDeque;
use node::Link;
use node::switch::Queue;
use packet::Packet;
#[derive(Debug)]
pub struct DropTailQueue{
limit_bytes: u32,
link: Link,
pkts: VecDeque<Packet>,
forced_next: Option<Packet>,
active: bool,
paused: bool,
}
impl DropTailQueue {
pub fn new(limit_bytes: u32, link: Link) -> Self {
DropTailQueue{
limit_bytes,
link,
pkts: VecDeque::new(),
forced_next: None,
active: false,
paused: false,
}
}
fn occupancy_bytes(&self) -> u32 {
self.pkts.iter().map(|p| p.get_size_bytes()).sum()
}
}
impl Queue for DropTailQueue {
fn link(&self) -> Link {
self.link
}
fn headroom(&self) -> u32 {
self.limit_bytes - self.occupancy_bytes()
}
fn enqueue(&mut self, p: Packet) -> Option<()> {
let occupancy_bytes = self.occupancy_bytes();
if occupancy_bytes + p.get_size_bytes() > self.limit_bytes {
// we have to drop this packet
return None;
}
self.pkts.push_back(p);
self.set_active(true);
Some(())
}
fn force_tx_next(&mut self, p: Packet) -> Option<()> {
self.forced_next = Some(p);
self.set_active(true);
Some(())
}
fn dequeue(&mut self) -> Option<Packet> {
if let None = self.forced_next {
if self.pkts.len() == 1 {
self.set_active(false);
}
self.pkts.pop_front()
} else {
self.forced_next.take()
}
}
fn discard_matching(&mut self, mut should_discard: Box<FnMut(Packet) -> bool>) -> usize {
let pkts = &mut self.pkts;
let after_pkts = pkts.iter().filter(|&&p| !should_discard(p)).map(|p| p.clone()).collect::<VecDeque<Packet>>();
let dropped = pkts.len() - after_pkts.len();
*pkts = after_pkts;
dropped
}
fn count_matching(&self, mut counter: Box<FnMut(Packet) -> bool>) -> usize {
self.pkts.iter().filter(|&&p| counter(p)).count()
}
fn is_active(&self) -> bool {
self.active && !self.paused
}
fn set_active(&mut self, a: bool) {
self.active = a;
}
fn is_paused(&self) -> bool {
self.paused
}
fn set_paused(&mut self, a: bool) {
self.paused = a;
}
}
#[cfg(test)]
mod tests {
use node::{Link, switch::Queue};
use packet::{Packet, PacketHeader};
use super::DropTailQueue;
#[test]
fn check_discard_matching() {
let mut q = DropTailQueue::new(15_000, Link{propagation_delay: 0, bandwidth_bps: 0, pfc_enabled: false, from: 0, to: 1});
let mut pkts = (0..).map(|seq| {
Packet::Data{
hdr: PacketHeader{
flow: 0,
from: 0,
to: 1,
},
seq,
length: 1460,
}
});
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
q.enqueue(pkts.next().unwrap()).unwrap();
assert_eq!(q.headroom(), 1500 * 2);
let dropped = q.discard_matching(Box::new(|p| match p {
Packet::Data{seq, ..} => {
seq > 5
}
_ => unreachable!(),
}));
assert_eq!(dropped, 2);
assert_eq!(q.headroom(), 1500 * 4);
}
}
| true |
132709ffc8d95dd0ef2df42d5535185e7b8d1939
|
Rust
|
nigelgray/rust-audio-analyser
|
/src/wav_helpers.rs
|
UTF-8
| 1,689 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
use std::sync::atomic::{Ordering};
// To find the RMS gain
// - Calculate the RMS value of the generated audio
// - Calculate the RMS value of the recorded audio
// - Calculate the power between the signals, using the generated audio as the reference
// (positive value means amplification, negative means attenuation)
// - We are interested in the voltage gain, not the power gain hence:
// L = 20 × log (voltage ratio V2 / V1) in dB (V1 = Vin is the reference)
// See http://www.sengpielaudio.com/calculator-amplification.htm
pub fn calculate_rms() {
if let Some(generated_rms) = find_rms_value(crate::GENERATE_PATH) {
if let Some(recorded_rms) = find_rms_value(crate::RECORD_PATH) {
let ratio = recorded_rms/generated_rms;
let gain = 20.0 * ratio.log10();
crate::RMS_GAIN.store(f64::to_bits(gain), Ordering::SeqCst);
}
}
}
// RMS = Root-Mean-Squared
// - Sqaure each sample
// - Sum them together
// - Work out the mean of the final sum
// - Take the square root
fn find_rms_value(filename: &str) -> Option<f64> {
let mut reader = hound::WavReader::open(filename).unwrap();
let sqr_sum = match reader.spec().sample_format {
hound::SampleFormat::Int => reader.samples::<i16>().fold(0.0, |sqr_sum, s| {
let sample = s.unwrap() as f64;
sqr_sum + sample * sample
}),
hound::SampleFormat::Float => reader.samples::<f32>().fold(0.0, |sqr_sum, s| {
let sample = s.unwrap() as f64;
sqr_sum + sample * sample
}),
};
let rms_value = (sqr_sum / reader.len() as f64).sqrt();
Some(rms_value)
}
| true |
88a2a4ed3d920a6b50652f055c8c530333ae91e5
|
Rust
|
sria91-rlox/cat-lox
|
/src/lexer/core.rs
|
UTF-8
| 5,111 | 3.6875 | 4 |
[
"MIT"
] |
permissive
|
use super::token::*;
pub struct Lexer {
input: Vec<char>,
index: usize,
}
impl Iterator for Lexer {
type Item = Token;
fn next(&mut self) -> Option<Token> {
match self.advance() {
None => None,
// Operators
Some('+') => Some(Token::Plus),
Some('-') => Some(Token::Minus),
Some('*') => Some(Token::Asterisk),
Some('(') => Some(Token::LeftParentheses),
Some(')') => Some(Token::RightParentheses),
Some(',') => Some(Token::Comma),
Some(';') => Some(Token::Semicolon),
Some('{') => Some(Token::LeftBrace),
Some('}') => Some(Token::RightBrace),
Some('.') => Some(Token::Dot),
Some('<') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::LessEqual)
}
_ => Some(Token::LessThan),
},
Some('>') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::GreaterEqual)
}
_ => Some(Token::GreaterThan),
},
Some('=') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::Equal)
}
_ => Some(Token::Assign),
},
Some('!') => match self.peek() {
Some('=') => {
self.advance();
Some(Token::NotEqual)
}
_ => Some(Token::Bang),
},
Some('/') => {
match self.peek() {
// comments
Some('/') => {
self.advance();
while let Some(current_char) = self.advance() {
if current_char == '\n' {
break;
}
}
self.next()
}
_ => Some(Token::Slash),
}
}
Some('"') => {
let mut literal = String::new();
while let Some(current_char) = self.advance() {
if current_char == '"' {
break;
}
literal.push(current_char);
}
Some(Token::LoxString(literal))
}
// Whitespace (must be checked after comments)
Some(' ') => self.next(),
Some('\t') => self.next(),
Some('\r') => self.next(),
Some('\n') => self.next(),
// literal, keyword, or number
Some(current_char) => {
// Todo: maybe it would be preferable to store a reference to a
// slice rather than storing a new heap allocated string.
let mut literal = String::new();
literal.push(current_char);
loop {
match self.peek() {
Some(next) => {
if is_blacklisted(&next) {
break;
}
if !is_part_of_number(current_char) && next == '.' {
break;
}
}
None => break,
}
if let Some(current_char) = self.advance() {
literal.push(current_char);
}
}
if keyword(&literal).is_some() {
keyword(&literal)
} else if literal.chars().all(is_part_of_number) {
Some(Token::Number(literal.parse::<f64>().unwrap()))
} else {
Some(Token::Ident(literal))
}
}
}
}
}
impl Lexer {
pub fn new(input: &str) -> Lexer {
Lexer {
input: input.chars().collect(),
index: 0,
}
}
fn advance(&mut self) -> Option<char> {
if self.index >= self.input.len() {
None
} else {
self.index += 1;
Some(self.input[self.index - 1])
}
}
fn peek(&self) -> Option<char> {
if self.index >= self.input.len() {
None
} else {
Some(self.input[self.index])
}
}
}
/// Is this char allowed to be in a literal?
///
/// TODO: if we ever need to add a new state, both this and the next
/// function above need to be changed. That violates the open closed
/// principle, investigate refactoring.
fn is_blacklisted(c: &char) -> bool {
let blacklist = vec![
'+', '-', '*', '<', '>', '(', ')', ',', ';', '{', '}', '=', '!', '/', ' ', '\t', '\r', '\n'
];
blacklist.contains(c)
}
fn is_part_of_number(c: char) -> bool {
c.is_digit(10) || c == '.'
}
| true |
1e121ec8be3577093268d22579675c828345cb74
|
Rust
|
cloew/KaoBoy
|
/src/cpu/instructions/jump/conditions.rs
|
UTF-8
| 3,242 | 2.828125 | 3 |
[] |
no_license
|
use super::super::utils::{check_half_carry};
use super::super::super::instruction_context::InstructionContext;
pub fn always(context: &InstructionContext) -> bool {
return true;
}
pub fn is_carry_flag_off(context: &InstructionContext) -> bool {
return context.registers().carry_flag.get() == false;
}
pub fn is_carry_flag_on(context: &InstructionContext) -> bool {
return context.registers().carry_flag.get();
}
pub fn is_zero_flag_off(context: &InstructionContext) -> bool {
return context.registers().zero_flag.get() == false;
}
pub fn is_zero_flag_on(context: &InstructionContext) -> bool {
return context.registers().zero_flag.get();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::as_hex;
use crate::cpu::testing::build_test_instruction_context;
#[test]
fn test_is_carry_flag_off_flag_off_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.reset();
let result = is_carry_flag_off(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_carry_flag_off_flag_on_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.activate();
let result = is_carry_flag_off(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_carry_flag_on_flag_off_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.reset();
let result = is_carry_flag_on(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_carry_flag_on_flag_on_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().carry_flag.activate();
let result = is_carry_flag_on(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_zero_flag_off_flag_off_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.reset();
let result = is_zero_flag_off(&context);
assert_eq!(result, true);
}
#[test]
fn test_is_zero_flag_off_flag_on_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.activate();
let result = is_zero_flag_off(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_zero_flag_on_flag_off_returns_false() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.reset();
let result = is_zero_flag_on(&context);
assert_eq!(result, false);
}
#[test]
fn test_is_zero_flag_on_flag_on_returns_true() {
let mut context = build_test_instruction_context();
context.registers_mut().zero_flag.activate();
let result = is_zero_flag_on(&context);
assert_eq!(result, true);
}
}
| true |
01efae5778d259345ebd37771f1ae6a59908cae6
|
Rust
|
aicacia/rs-lexer
|
/src/token.rs
|
UTF-8
| 714 | 2.9375 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use super::TokenMeta;
#[derive(Serialize, Deserialize, Clone, PartialEq, Debug, Eq, PartialOrd, Ord, Hash)]
pub struct Token<T> {
meta: TokenMeta,
value: T,
}
unsafe impl<T> Send for Token<T> where T: Send {}
unsafe impl<T> Sync for Token<T> where T: Sync {}
impl<T> Token<T> {
#[inline(always)]
pub fn new(meta: TokenMeta, value: T) -> Self {
Token {
meta: meta,
value: value,
}
}
#[inline(always)]
pub fn meta(&self) -> &TokenMeta {
&self.meta
}
#[inline(always)]
pub fn into_meta(self) -> TokenMeta {
self.meta
}
#[inline(always)]
pub fn value(&self) -> &T {
&self.value
}
#[inline(always)]
pub fn into_value(self) -> T {
self.value
}
}
| true |
73dafda5b5dd20fc668be9760d08a4cdbd4a9861
|
Rust
|
chromium/chromium
|
/third_party/rust/getrandom/v0_2/crate/src/error.rs
|
UTF-8
| 8,110 | 2.640625 | 3 |
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later"
] |
permissive
|
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::{fmt, num::NonZeroU32};
/// A small and `no_std` compatible error type
///
/// The [`Error::raw_os_error()`] will indicate if the error is from the OS, and
/// if so, which error code the OS gave the application. If such an error is
/// encountered, please consult with your system documentation.
///
/// Internally this type is a NonZeroU32, with certain values reserved for
/// certain purposes, see [`Error::INTERNAL_START`] and [`Error::CUSTOM_START`].
///
/// *If this crate's `"std"` Cargo feature is enabled*, then:
/// - [`getrandom::Error`][Error] implements
/// [`std::error::Error`](https://doc.rust-lang.org/std/error/trait.Error.html)
/// - [`std::io::Error`](https://doc.rust-lang.org/std/io/struct.Error.html) implements
/// [`From<getrandom::Error>`](https://doc.rust-lang.org/std/convert/trait.From.html).
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct Error(NonZeroU32);
const fn internal_error(n: u16) -> Error {
// SAFETY: code > 0 as INTERNAL_START > 0 and adding n won't overflow a u32.
let code = Error::INTERNAL_START + (n as u32);
Error(unsafe { NonZeroU32::new_unchecked(code) })
}
impl Error {
/// This target/platform is not supported by `getrandom`.
pub const UNSUPPORTED: Error = internal_error(0);
/// The platform-specific `errno` returned a non-positive value.
pub const ERRNO_NOT_POSITIVE: Error = internal_error(1);
/// Call to iOS [`SecRandomCopyBytes`](https://developer.apple.com/documentation/security/1399291-secrandomcopybytes) failed.
pub const IOS_SEC_RANDOM: Error = internal_error(3);
/// Call to Windows [`RtlGenRandom`](https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom) failed.
pub const WINDOWS_RTL_GEN_RANDOM: Error = internal_error(4);
/// RDRAND instruction failed due to a hardware issue.
pub const FAILED_RDRAND: Error = internal_error(5);
/// RDRAND instruction unsupported on this target.
pub const NO_RDRAND: Error = internal_error(6);
/// The environment does not support the Web Crypto API.
pub const WEB_CRYPTO: Error = internal_error(7);
/// Calling Web Crypto API `crypto.getRandomValues` failed.
pub const WEB_GET_RANDOM_VALUES: Error = internal_error(8);
/// On VxWorks, call to `randSecure` failed (random number generator is not yet initialized).
pub const VXWORKS_RAND_SECURE: Error = internal_error(11);
/// Node.js does not have the `crypto` CommonJS module.
pub const NODE_CRYPTO: Error = internal_error(12);
/// Calling Node.js function `crypto.randomFillSync` failed.
pub const NODE_RANDOM_FILL_SYNC: Error = internal_error(13);
/// Called from an ES module on Node.js. This is unsupported, see:
/// <https://docs.rs/getrandom#nodejs-es-module-support>.
pub const NODE_ES_MODULE: Error = internal_error(14);
/// Codes below this point represent OS Errors (i.e. positive i32 values).
/// Codes at or above this point, but below [`Error::CUSTOM_START`] are
/// reserved for use by the `rand` and `getrandom` crates.
pub const INTERNAL_START: u32 = 1 << 31;
/// Codes at or above this point can be used by users to define their own
/// custom errors.
pub const CUSTOM_START: u32 = (1 << 31) + (1 << 30);
/// Extract the raw OS error code (if this error came from the OS)
///
/// This method is identical to [`std::io::Error::raw_os_error()`][1], except
/// that it works in `no_std` contexts. If this method returns `None`, the
/// error value can still be formatted via the `Display` implementation.
///
/// [1]: https://doc.rust-lang.org/std/io/struct.Error.html#method.raw_os_error
#[inline]
pub fn raw_os_error(self) -> Option<i32> {
if self.0.get() < Self::INTERNAL_START {
match () {
#[cfg(target_os = "solid_asp3")]
// On SOLID, negate the error code again to obtain the original
// error code.
() => Some(-(self.0.get() as i32)),
#[cfg(not(target_os = "solid_asp3"))]
() => Some(self.0.get() as i32),
}
} else {
None
}
}
/// Extract the bare error code.
///
/// This code can either come from the underlying OS, or be a custom error.
/// Use [`Error::raw_os_error()`] to disambiguate.
#[inline]
pub const fn code(self) -> NonZeroU32 {
self.0
}
}
cfg_if! {
if #[cfg(unix)] {
fn os_err(errno: i32, buf: &mut [u8]) -> Option<&str> {
let buf_ptr = buf.as_mut_ptr() as *mut libc::c_char;
if unsafe { libc::strerror_r(errno, buf_ptr, buf.len()) } != 0 {
return None;
}
// Take up to trailing null byte
let n = buf.len();
let idx = buf.iter().position(|&b| b == 0).unwrap_or(n);
core::str::from_utf8(&buf[..idx]).ok()
}
} else {
fn os_err(_errno: i32, _buf: &mut [u8]) -> Option<&str> {
None
}
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dbg = f.debug_struct("Error");
if let Some(errno) = self.raw_os_error() {
dbg.field("os_error", &errno);
let mut buf = [0u8; 128];
if let Some(err) = os_err(errno, &mut buf) {
dbg.field("description", &err);
}
} else if let Some(desc) = internal_desc(*self) {
dbg.field("internal_code", &self.0.get());
dbg.field("description", &desc);
} else {
dbg.field("unknown_code", &self.0.get());
}
dbg.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(errno) = self.raw_os_error() {
let mut buf = [0u8; 128];
match os_err(errno, &mut buf) {
Some(err) => err.fmt(f),
None => write!(f, "OS Error: {}", errno),
}
} else if let Some(desc) = internal_desc(*self) {
f.write_str(desc)
} else {
write!(f, "Unknown Error: {}", self.0.get())
}
}
}
impl From<NonZeroU32> for Error {
fn from(code: NonZeroU32) -> Self {
Self(code)
}
}
fn internal_desc(error: Error) -> Option<&'static str> {
match error {
Error::UNSUPPORTED => Some("getrandom: this target is not supported"),
Error::ERRNO_NOT_POSITIVE => Some("errno: did not return a positive value"),
Error::IOS_SEC_RANDOM => Some("SecRandomCopyBytes: iOS Security framework failure"),
Error::WINDOWS_RTL_GEN_RANDOM => Some("RtlGenRandom: Windows system function failure"),
Error::FAILED_RDRAND => Some("RDRAND: failed multiple times: CPU issue likely"),
Error::NO_RDRAND => Some("RDRAND: instruction not supported"),
Error::WEB_CRYPTO => Some("Web Crypto API is unavailable"),
Error::WEB_GET_RANDOM_VALUES => Some("Calling Web API crypto.getRandomValues failed"),
Error::VXWORKS_RAND_SECURE => Some("randSecure: VxWorks RNG module is not initialized"),
Error::NODE_CRYPTO => Some("Node.js crypto CommonJS module is unavailable"),
Error::NODE_RANDOM_FILL_SYNC => Some("Calling Node.js API crypto.randomFillSync failed"),
Error::NODE_ES_MODULE => Some("Node.js ES modules are not directly supported, see https://docs.rs/getrandom#nodejs-es-module-support"),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::Error;
use core::mem::size_of;
#[test]
fn test_size() {
assert_eq!(size_of::<Error>(), 4);
assert_eq!(size_of::<Result<(), Error>>(), 4);
}
}
| true |
454b656776ef9ea552c3c6d8a0b47eb502367581
|
Rust
|
vanhtuan0409/aoc
|
/2022/src/bin/day6/main.rs
|
UTF-8
| 595 | 2.5625 | 3 |
[] |
no_license
|
use aoc_2022::get_input_file;
use itertools::Itertools;
use std::fs::File;
use std::io::{self, BufRead};
fn main() {
let f: io::Result<File> = get_input_file!("input1.txt");
let r = io::BufReader::new(f.unwrap());
r.lines().map(|line| line.unwrap()).for_each(|line| {
println!("======");
let chars = line.chars().collect_vec();
let (idx, _) = chars
.windows(14)
.enumerate()
.find(|(_, window)| window.iter().all_unique())
.unwrap();
println!("original signal: {}. Idx {}", line, idx + 14);
});
}
| true |
01839d034cd249a1e9dc8685da3c8a47731c3bf0
|
Rust
|
ErisMik/minecator
|
/src/minecraft/chunk.rs
|
UTF-8
| 786 | 2.75 | 3 |
[] |
no_license
|
use byteorder::{BigEndian, ByteOrder};
use nbt;
use std::io::Cursor;
#[derive(Debug)]
pub struct Chunk {
timestamp: u32,
blob: nbt::Blob,
}
impl Chunk {
pub fn new(timestamp: u32, data: Vec<u8>) -> std::io::Result<Chunk> {
let chunk_length = BigEndian::read_u32(&data[0..4]) as usize;
let compression_type = u8::from_be(data[4]);
let mut data_reader = Cursor::new(&data[5..chunk_length]);
let nbt_data = match compression_type {
1 => nbt::Blob::from_gzip_reader(&mut data_reader)?,
2 => nbt::Blob::from_zlib_reader(&mut data_reader)?,
_ => nbt::Blob::from_reader(&mut data_reader)?,
};
return Ok(Chunk {
timestamp: timestamp,
blob: nbt_data,
});
}
}
| true |
d1827d3f85c8e79749a0f2c183e4134b86d74ae0
|
Rust
|
femnad/leth
|
/src/main.rs
|
UTF-8
| 2,851 | 2.5625 | 3 |
[] |
no_license
|
extern crate regex;
extern crate skim;
extern crate structopt;
use std::collections::HashMap;
use std::io::Cursor;
use std::io::{self, Read};
use std::process::{Command, Stdio};
use regex::Regex;
use skim::prelude::*;
use structopt::StructOpt;
const LINE_SPLITTER: char = '=';
const URL_REGEX: &str = r"(http(s)?://[a-zA-Z0-9_/?+&.=@%#;~:-]+)";
#[derive(Debug, StructOpt)]
#[structopt(name = "leth", about = "URL extractor intended to be used within mutt")]
struct Opt {}
pub fn main() {
Opt::from_args();
let options = SkimOptionsBuilder::default()
.multi(true)
.bind(vec!["ctrl-k:kill-line"])
.build()
.unwrap();
let re = Regex::new(URL_REGEX).unwrap();
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).unwrap();
let lines = buffer.split("\n");
let mut split_lines = false;
let mut split_line_buffer: Vec<&str> = Vec::new();
let mut merged_lines: Vec<String> = Vec::new();
for line in lines {
if line.len() == 0 {
continue
}
if line.ends_with(LINE_SPLITTER) {
let mergable = line.get(0..line.len() - 1).unwrap_or("");
split_line_buffer.push(mergable);
split_lines = true;
continue;
}
if split_lines {
split_lines = false;
split_line_buffer.push(line);
let merged_line = &split_line_buffer.join("");
merged_lines.push(merged_line.to_string());
split_line_buffer = Vec::new();
} else {
merged_lines.push(line.to_string());
}
}
let mut matches: HashMap<String, u8> = HashMap::new();
let mut match_index = 1;
for line in merged_lines {
let sanitized = line.replace("=3D", "=");
for capture in re.captures_iter(&sanitized) {
let url_match = capture.get(1).unwrap().as_str();
if matches.contains_key(url_match) {
continue;
}
matches.insert(url_match.to_string(), match_index);
match_index += 1;
}
}
let mut ordered_items: Vec<_> = matches.into_iter().collect();
ordered_items.sort_by(|a, b| a.1.cmp(&b.1));
let item_list: Vec<_> = ordered_items.iter().map(|item| item.0.as_str()).collect();
let items = item_list.join("\n");
let item_reader = SkimItemReader::default();
let items = item_reader.of_bufread(Cursor::new(items));
let output = Skim::run_with(&options, Some(items)).unwrap();
if output.is_abort {
return;
}
for item in output.selected_items.iter() {
let url = item.clone();
Command::new("firefox")
.arg(url.output().as_ref())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.unwrap();
}
}
| true |
45ab8b0a952d1ebd042a38d187027ee3fb551320
|
Rust
|
DanMaycock/fortunes_algorithm_rs
|
/src/boundingbox.rs
|
UTF-8
| 17,186 | 3.359375 | 3 |
[] |
no_license
|
use super::*;
use std::f64;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Side {
Left,
Right,
Top,
Bottom,
None,
}
impl Side {
// Iterates round the sides in an anti clockwise direction
fn next(self) -> Side {
match self {
Side::Left => Side::Bottom,
Side::Top => Side::Left,
Side::Right => Side::Top,
Side::Bottom => Side::Right,
Side::None => Side::None,
}
}
}
#[derive(Debug)]
pub struct BoundingBox {
left: f64,
right: f64,
top: f64,
bottom: f64,
}
impl BoundingBox {
pub fn new(left: f64, right: f64, top: f64, bottom: f64) -> Self {
BoundingBox {
left,
right,
top,
bottom,
}
}
pub fn contains(&self, point: &cgmath::Point2<f64>) -> bool {
(point.x >= self.left)
&& (point.x <= self.right)
&& (point.y >= self.top)
&& (point.y <= self.bottom)
}
pub fn get_intersection(&self, origin: &cgmath::Point2<f64>, direction: &cgmath::Vector2<f64>) -> (cgmath::Point2<f64>, Side) {
assert!(self.contains(origin));
let (t1, side1) = if direction.x < 0.0 {
((self.right - origin.x) / direction.x, Side::Right)
} else if direction.x > 0.0 {
((self.left - origin.x) / direction.x, Side::Left)
} else {
(f64::MIN, Side::None)
};
let (t2, side2) = if direction.y > 0.0 {
((self.top - origin.y) / direction.y, Side::Top)
} else if direction.y < 0.0 {
((self.bottom - origin.y) / direction.y, Side::Bottom)
} else {
(f64::MAX, Side::None)
};
let (t, side) = if t2.abs() < t1.abs() {
(t2, side2)
} else {
(t1, side1)
};
(*origin + (*direction * t), side)
}
pub fn get_corner(&self, side_1: Side, side_2: Side) -> cgmath::Point2<f64> {
match (side_1, side_2) {
(Side::Top, Side::Left) | (Side::Left, Side::Top) => self.get_top_left(),
(Side::Top, Side::Right) | (Side::Right, Side::Top) => self.get_top_right(),
(Side::Bottom, Side::Left) | (Side::Left, Side::Bottom) => self.get_bottom_left(),
(Side::Bottom, Side::Right) | (Side::Right, Side::Bottom) => self.get_bottom_right(),
_ => panic!("Invalid corner sides"),
}
}
pub fn get_top_left(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.left, self.top)
}
pub fn get_top_right(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.right, self.top)
}
pub fn get_bottom_left(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.left, self.bottom)
}
pub fn get_bottom_right(&self) -> cgmath::Point2<f64> {
cgmath::Point2::new(self.right, self.bottom)
}
pub fn get_intersections(
&self,
origin: &cgmath::Point2<f64>,
destination: &cgmath::Point2<f64>,
) -> Vec<(cgmath::Point2<f64>, Side)> {
let mut intersections = vec![];
let direction = *destination - *origin;
// Left
if origin.x < self.left || destination.x < self.left {
let t = (self.left - origin.x) / direction.x;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.y >= self.top && intersection_pt.y <= self.bottom {
intersections.push((intersection_pt, Side::Left));
}
}
}
// Right
if origin.x > self.right || destination.x > self.right {
let t = (self.right - origin.x) / direction.x;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.y >= self.top && intersection_pt.y <= self.bottom {
intersections.push((intersection_pt, Side::Right));
}
}
}
// Top
if origin.y < self.top || destination.y < self.top {
let t = (self.top - origin.y) / direction.y;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.x <= self.right && intersection_pt.x >= self.left {
intersections.push((intersection_pt, Side::Top));
}
}
}
// Bottom
if origin.y > self.bottom || destination.y > self.bottom {
let t = (self.bottom - origin.y) / direction.y;
if t > 0.0 && t < 1.0 {
let intersection_pt = *origin + (direction * t);
if intersection_pt.x <= self.right && intersection_pt.x >= self.left {
intersections.push((intersection_pt, Side::Bottom));
}
}
}
intersections
}
pub fn intersect_diagram(&self, voronoi: &mut Diagram) {
let mut vertices_to_remove = vec![];
let mut half_edges_to_remove = vec![];
let mut processed_half_edges = vec![];
for face in voronoi.get_face_indices() {
let start_half_edge = voronoi.get_face_outer_component(face).unwrap();
let mut outgoing_half_edge: Option<HalfEdgeKey> = None;
let mut outgoing_side = Side::None;
let mut incoming_half_edge: Option<HalfEdgeKey> = None;
let mut incoming_side = Side::None;
let mut half_edge = start_half_edge;
loop {
let origin = voronoi.get_half_edge_origin(half_edge).unwrap();
let destination = voronoi.get_half_edge_destination(half_edge).unwrap();
let inside = self.contains(&voronoi.get_vertex_point(origin));
let next_inside = self.contains(&voronoi.get_vertex_point(destination));
let next_half_edge = voronoi.get_half_edge_next(half_edge).unwrap();
if !inside || !next_inside {
let intersections = self.get_intersections(
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination),
);
if !inside && !next_inside {
// Both points are outside the box
if intersections.is_empty() {
// The edge is outside the box
vertices_to_remove.push(origin);
if Some(half_edge) == voronoi.get_face_outer_component(face) {
// Update the outer component before we delete the half_edge
voronoi.set_face_outer_component(
face,
voronoi.get_half_edge_next(half_edge),
);
}
half_edges_to_remove.push(half_edge);
} else if intersections.len() == 2 {
// The edge crosses the bounds of the box twice
vertices_to_remove.push(origin);
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_origin(
half_edge,
voronoi.get_half_edge_destination(half_edge_twin.unwrap()),
);
voronoi.set_half_edge_destination(
half_edge,
voronoi.get_half_edge_origin(half_edge_twin.unwrap()),
);
} else {
let origin = voronoi.add_vertex(intersections[0].0);
let destination = voronoi.add_vertex(intersections[1].0);
voronoi.set_half_edge_origin(half_edge, Some(origin));
voronoi.set_half_edge_destination(half_edge, Some(destination));
}
if outgoing_half_edge.is_some() {
self.link_vertices(
voronoi,
outgoing_half_edge.unwrap(),
outgoing_side,
half_edge,
intersections[0].1,
)
}
outgoing_half_edge = Some(half_edge);
outgoing_side = intersections[1].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
} else if inside && !next_inside {
// Edge is going outside the box
if intersections.len() == 1 {
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_destination(
half_edge,
voronoi.get_half_edge_origin(half_edge_twin.unwrap()),
);
} else {
let destination = voronoi.add_vertex(intersections[0].0);
voronoi.set_half_edge_destination(half_edge, Some(destination));
}
if incoming_half_edge.is_some() {
self.link_vertices(
voronoi,
half_edge,
intersections[0].1,
incoming_half_edge.unwrap(),
incoming_side,
)
}
outgoing_half_edge = Some(half_edge);
outgoing_side = intersections[0].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
} else if !inside && next_inside {
// Edge is coming into the box
if intersections.len() == 1 {
vertices_to_remove.push(origin);
let half_edge_twin = voronoi.get_half_edge_twin(half_edge);
if half_edge_twin.is_some()
&& processed_half_edges.contains(&half_edge_twin.unwrap())
{
voronoi.set_half_edge_origin(
half_edge,
voronoi.get_half_edge_destination(half_edge_twin.unwrap()),
);
} else {
let origin = voronoi.add_vertex(intersections[0].0);
voronoi.set_half_edge_origin(half_edge, Some(origin));
}
if outgoing_half_edge.is_some() {
self.link_vertices(
voronoi,
outgoing_half_edge.unwrap(),
outgoing_side,
half_edge,
intersections[0].1,
)
}
incoming_half_edge = Some(half_edge);
incoming_side = intersections[0].1;
processed_half_edges.push(half_edge);
} else {
panic!(
"An edge that begins inside the box but ends outside can only have a single intersection, origin {:?}, destination {:?}",
&voronoi.get_vertex_point(origin),
&voronoi.get_vertex_point(destination)
);
}
}
}
if next_half_edge == start_half_edge {
// Back where we started so break out of the loop
break;
}
half_edge = next_half_edge;
}
}
for half_edge in half_edges_to_remove {
voronoi.remove_half_edge(half_edge);
}
for vertex in vertices_to_remove {
voronoi.remove_vertex(vertex);
}
}
pub fn link_vertices(
&self,
voronoi: &mut Diagram,
start_edge: HalfEdgeKey,
start_side: Side,
end_edge: HalfEdgeKey,
end_side: Side,
) {
let mut edge = start_edge;
let mut side = start_side;
let incident_face = voronoi.get_half_edge_incident_face(edge).unwrap();
while side != end_side {
let new_edge = voronoi.add_half_edge(incident_face);
voronoi.link_half_edges(edge, new_edge);
voronoi.set_half_edge_origin(new_edge, voronoi.get_half_edge_destination(edge));
let destination = voronoi.add_vertex(self.get_corner(side, side.next()));
voronoi.set_half_edge_destination(new_edge, Some(destination));
side = side.next();
edge = new_edge;
}
let new_edge = voronoi.add_half_edge(incident_face);
voronoi.link_half_edges(edge, new_edge);
voronoi.link_half_edges(new_edge, end_edge);
voronoi.set_half_edge_origin(new_edge, voronoi.get_half_edge_destination(edge));
voronoi.set_half_edge_destination(new_edge, voronoi.get_half_edge_origin(end_edge));
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn contains_test() {
let bbox = BoundingBox::new(0.0, 1.0, 0.0, 1.0);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, 0.5)), true);
assert_eq!(bbox.contains(&cgmath::Point2::new(1.5, 0.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(-0.5, 0.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, 1.5)), false);
assert_eq!(bbox.contains(&cgmath::Point2::new(0.5, -0.5)), false);
}
#[test]
fn intersections_test() {
let bbox = BoundingBox::new(0.0, 1.0, 0.0, 1.0);
let origin = cgmath::Point2::new(1.5, 0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(0.5, 1.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(0.5, -0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(-0.5, 0.5);
let destination = cgmath::Point2::new(0.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 1);
let origin = cgmath::Point2::new(-0.5, 0.5);
let destination = cgmath::Point2::new(1.5, 0.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 2);
let origin = cgmath::Point2::new(0.5, -0.5);
let destination = cgmath::Point2::new(0.5, 1.5);
let intersections = bbox.get_intersections(&origin, &destination);
assert_eq!(intersections.len(), 2);
}
}
| true |
13aae36a5299459fc447b9505026e6a796e511ad
|
Rust
|
Xudong-Huang/radiotap
|
/src/lib.rs
|
UTF-8
| 12,446 | 3.234375 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! A parser for the [Radiotap](http://www.radiotap.org/) capture format.
//!
//! # Usage
//!
//! The `Radiotap::from_bytes(&capture)` constructor will parse all present
//! fields into a [Radiotap](struct.Radiotap.html) struct:
//!
//! ```
//! use radiotap::Radiotap;
//!
//! fn main() {
//! let capture = [
//! 0, 0, 56, 0, 107, 8, 52, 0, 185, 31, 155, 154, 0, 0, 0, 0, 20, 0, 124, 21, 64, 1, 213,
//! 166, 1, 0, 0, 0, 64, 1, 1, 0, 124, 21, 100, 34, 249, 1, 0, 0, 0, 0, 0, 0, 255, 1, 80,
//! 4, 115, 0, 0, 0, 1, 63, 0, 0,
//! ];
//!
//! let radiotap = Radiotap::from_bytes(&capture).unwrap();
//! println!("{:?}", radiotap.vht);
//! }
//! ```
//!
//! If you just want to parse a few specific fields from the Radiotap capture
//! you can create an iterator using `RadiotapIterator::from_bytes(&capture)`:
//!
//! ```
//! use radiotap::{field, RadiotapIterator};
//!
//! fn main() {
//! let capture = [
//! 0, 0, 56, 0, 107, 8, 52, 0, 185, 31, 155, 154, 0, 0, 0, 0, 20, 0, 124, 21, 64, 1, 213,
//! 166, 1, 0, 0, 0, 64, 1, 1, 0, 124, 21, 100, 34, 249, 1, 0, 0, 0, 0, 0, 0, 255, 1, 80,
//! 4, 115, 0, 0, 0, 1, 63, 0, 0,
//! ];
//!
//! for element in RadiotapIterator::from_bytes(&capture).unwrap() {
//! match element {
//! Ok((field::Kind::VHT, data)) => {
//! let vht: field::VHT = field::from_bytes(data).unwrap();
//! println!("{:?}", vht);
//! }
//! _ => {}
//! }
//! }
//! }
//! ```
pub mod field;
use std::{io::Cursor, result};
use quick_error::quick_error;
use crate::field::*;
quick_error! {
/// All errors returned and used by the radiotap module.
#[derive(Debug)]
pub enum Error {
/// The internal cursor on the data returned an IO error.
ParseError(err: std::io::Error) {
from()
source(err)
description(err.description())
}
/// The given data is not a complete Radiotap capture.
IncompleteError {
display("The given data is not a complete Radiotap capture")
}
/// The given data is shorter than the amount specified in the Radiotap header.
InvalidLength {
display("The given data is shorter than the amount specified in the Radiotap header")
}
/// The given data is not a valid Radiotap capture.
InvalidFormat {
display("The given data is not a valid Radiotap capture")
}
/// Unsupported Radiotap header version.
UnsupportedVersion {
display("Unsupported Radiotap header version")
}
/// Unsupported Radiotap field.
UnsupportedField {
display("Unsupported Radiotap field")
}
}
}
type Result<T> = result::Result<T, Error>;
/// A trait to align an offset to particular word size, usually 1, 2, 4, or 8.
trait Align {
/// Aligns the offset to `align` size.
fn align(&mut self, align: u64);
}
impl<T> Align for Cursor<T> {
/// Aligns the Cursor position to `align` size.
fn align(&mut self, align: u64) {
let p = self.position();
self.set_position((p + align - 1) & !(align - 1));
}
}
/// Represents an unparsed Radiotap capture format, only the header field is
/// parsed.
#[derive(Debug, Clone)]
pub struct RadiotapIterator<'a> {
header: Header,
data: &'a [u8],
}
impl<'a> RadiotapIterator<'a> {
pub fn from_bytes(input: &'a [u8]) -> Result<RadiotapIterator<'a>> {
Ok(RadiotapIterator::parse(input)?.0)
}
pub fn parse(input: &'a [u8]) -> Result<(RadiotapIterator<'a>, &'a [u8])> {
let header: Header = from_bytes(input)?;
let (data, rest) = input.split_at(header.length);
Ok((RadiotapIterator { header, data }, rest))
}
}
/// An iterator over Radiotap fields.
#[doc(hidden)]
#[derive(Debug, Clone)]
pub struct RadiotapIteratorIntoIter<'a> {
present: Vec<Kind>,
cursor: Cursor<&'a [u8]>,
}
impl<'a> IntoIterator for &'a RadiotapIterator<'a> {
type IntoIter = RadiotapIteratorIntoIter<'a>;
type Item = Result<(Kind, &'a [u8])>;
fn into_iter(self) -> Self::IntoIter {
let present = self.header.present.iter().rev().cloned().collect();
let mut cursor = Cursor::new(self.data);
cursor.set_position(self.header.size as u64);
RadiotapIteratorIntoIter { present, cursor }
}
}
impl<'a> IntoIterator for RadiotapIterator<'a> {
type IntoIter = RadiotapIteratorIntoIter<'a>;
type Item = Result<(Kind, &'a [u8])>;
fn into_iter(self) -> Self::IntoIter {
let present = self.header.present.iter().rev().cloned().collect();
let mut cursor = Cursor::new(self.data);
cursor.set_position(self.header.size as u64);
RadiotapIteratorIntoIter { present, cursor }
}
}
impl<'a> Iterator for RadiotapIteratorIntoIter<'a> {
type Item = Result<(Kind, &'a [u8])>;
fn next(&mut self) -> Option<Self::Item> {
match self.present.pop() {
Some(mut kind) => {
// Align the cursor to the current field's needed alignment.
self.cursor.align(kind.align());
let mut start = self.cursor.position() as usize;
let mut end = start + kind.size();
// The header lied about how long the body was
if end > self.cursor.get_ref().len() {
Some(Err(Error::IncompleteError))
} else {
// Switching to a vendor namespace, and we don't know how to handle
// so we just return the entire vendor namespace section
if kind == Kind::VendorNamespace(None) {
match VendorNamespace::from_bytes(&self.cursor.get_ref()[start..end]) {
Ok(vns) => {
start += kind.size();
end += vns.skip_length as usize;
kind = Kind::VendorNamespace(Some(vns));
}
Err(e) => return Some(Err(e)),
}
}
let data = &self.cursor.get_ref()[start..end];
self.cursor.set_position(end as u64);
Some(Ok((kind, data)))
}
}
None => None,
}
}
}
impl Default for Header {
fn default() -> Header {
Header {
version: 0,
length: 8,
present: Vec::new(),
size: 8,
}
}
}
/// Represents a parsed Radiotap capture, including the parsed header and all
/// fields as Option members.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct Radiotap {
pub header: Header,
pub tsft: Option<TSFT>,
pub flags: Option<Flags>,
pub rate: Option<Rate>,
pub channel: Option<Channel>,
pub fhss: Option<FHSS>,
pub antenna_signal: Option<AntennaSignal>,
pub antenna_noise: Option<AntennaNoise>,
pub lock_quality: Option<LockQuality>,
pub tx_attenuation: Option<TxAttenuation>,
pub tx_attenuation_db: Option<TxAttenuationDb>,
pub tx_power: Option<TxPower>,
pub antenna: Option<Antenna>,
pub antenna_signal_db: Option<AntennaSignalDb>,
pub antenna_noise_db: Option<AntennaNoiseDb>,
pub rx_flags: Option<RxFlags>,
pub tx_flags: Option<TxFlags>,
pub rts_retries: Option<RTSRetries>,
pub data_retries: Option<DataRetries>,
pub xchannel: Option<XChannel>,
pub mcs: Option<MCS>,
pub ampdu_status: Option<AMPDUStatus>,
pub vht: Option<VHT>,
pub timestamp: Option<Timestamp>,
}
impl Radiotap {
/// Returns the parsed [Radiotap](struct.Radiotap.html) from an input byte
/// array.
pub fn from_bytes(input: &[u8]) -> Result<Radiotap> {
Ok(Radiotap::parse(input)?.0)
}
/// Returns the parsed [Radiotap](struct.Radiotap.html) and remaining data
/// from an input byte array.
pub fn parse(input: &[u8]) -> Result<(Radiotap, &[u8])> {
let (iterator, rest) = RadiotapIterator::parse(input)?;
let mut radiotap = Radiotap {
header: iterator.header.clone(),
..Default::default()
};
for result in &iterator {
let (field_kind, data) = result?;
match field_kind {
Kind::TSFT => radiotap.tsft = from_bytes_some(data)?,
Kind::Flags => radiotap.flags = from_bytes_some(data)?,
Kind::Rate => radiotap.rate = from_bytes_some(data)?,
Kind::Channel => radiotap.channel = from_bytes_some(data)?,
Kind::FHSS => radiotap.fhss = from_bytes_some(data)?,
Kind::AntennaSignal => radiotap.antenna_signal = from_bytes_some(data)?,
Kind::AntennaNoise => radiotap.antenna_noise = from_bytes_some(data)?,
Kind::LockQuality => radiotap.lock_quality = from_bytes_some(data)?,
Kind::TxAttenuation => radiotap.tx_attenuation = from_bytes_some(data)?,
Kind::TxAttenuationDb => radiotap.tx_attenuation_db = from_bytes_some(data)?,
Kind::TxPower => radiotap.tx_power = from_bytes_some(data)?,
Kind::Antenna => radiotap.antenna = from_bytes_some(data)?,
Kind::AntennaSignalDb => radiotap.antenna_signal_db = from_bytes_some(data)?,
Kind::AntennaNoiseDb => radiotap.antenna_noise_db = from_bytes_some(data)?,
Kind::RxFlags => radiotap.rx_flags = from_bytes_some(data)?,
Kind::TxFlags => radiotap.tx_flags = from_bytes_some(data)?,
Kind::RTSRetries => radiotap.rts_retries = from_bytes_some(data)?,
Kind::DataRetries => radiotap.data_retries = from_bytes_some(data)?,
Kind::XChannel => radiotap.xchannel = from_bytes_some(data)?,
Kind::MCS => radiotap.mcs = from_bytes_some(data)?,
Kind::AMPDUStatus => radiotap.ampdu_status = from_bytes_some(data)?,
Kind::VHT => radiotap.vht = from_bytes_some(data)?,
Kind::Timestamp => radiotap.timestamp = from_bytes_some(data)?,
_ => {}
}
}
Ok((radiotap, rest))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn good_vendor() {
let frame = [
0, 0, 39, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
assert_eq!(
Radiotap::from_bytes(&frame).unwrap().rate.unwrap(),
Rate { value: 2.0 }
);
}
#[test]
fn bad_version() {
let frame = [
1, 0, 39, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::UnsupportedVersion => {}
e => panic!("Error not UnsupportedVersion: {:?}", e),
};
}
#[test]
fn bad_header_length() {
let frame = [
0, 0, 40, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::InvalidLength => {}
e => panic!("Error not InvalidLength: {:?}", e),
};
}
#[test]
fn bad_actual_length() {
let frame = [
0, 0, 39, 0, 47, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255, 2, 0, 222, 173, 4,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::IncompleteError => {}
e => panic!("Error not IncompleteError: {:?}", e),
};
}
#[test]
fn bad_vendor() {
let frame = [
0, 0, 34, 0, 46, 72, 0, 192, 0, 0, 0, 128, 0, 0, 0, 160, 4, 0, 0, 0, 16, 2, 158, 9,
160, 0, 227, 5, 0, 0, 255, 255, 255, 255,
];
match Radiotap::from_bytes(&frame).unwrap_err() {
Error::IncompleteError => {}
e => panic!("Error not IncompleteError: {:?}", e),
};
}
}
| true |
f44f5a594c915e78aba56d4941978cd9c394ea29
|
Rust
|
BurntSushi/rust-analyzer
|
/xtask/src/codegen.rs
|
UTF-8
| 2,791 | 2.71875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! We use code generation heavily in rust-analyzer.
//!
//! Rather then doing it via proc-macros, we use old-school way of just dumping
//! the source code.
//!
//! This module's submodules define specific bits that we generate.
mod gen_syntax;
mod gen_parser_tests;
mod gen_assists_docs;
use std::{mem, path::Path};
use crate::{not_bash::fs2, Result};
pub use self::{
gen_assists_docs::generate_assists_docs, gen_parser_tests::generate_parser_tests,
gen_syntax::generate_syntax,
};
const GRAMMAR_DIR: &str = "crates/ra_parser/src/grammar";
const OK_INLINE_TESTS_DIR: &str = "crates/ra_syntax/test_data/parser/inline/ok";
const ERR_INLINE_TESTS_DIR: &str = "crates/ra_syntax/test_data/parser/inline/err";
const SYNTAX_KINDS: &str = "crates/ra_parser/src/syntax_kind/generated.rs";
const AST_NODES: &str = "crates/ra_syntax/src/ast/generated/nodes.rs";
const AST_TOKENS: &str = "crates/ra_syntax/src/ast/generated/tokens.rs";
const ASSISTS_DIR: &str = "crates/ra_assists/src/handlers";
const ASSISTS_TESTS: &str = "crates/ra_assists/src/doc_tests/generated.rs";
const ASSISTS_DOCS: &str = "docs/user/assists.md";
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Mode {
Overwrite,
Verify,
}
/// A helper to update file on disk if it has changed.
/// With verify = false,
fn update(path: &Path, contents: &str, mode: Mode) -> Result<()> {
match fs2::read_to_string(path) {
Ok(ref old_contents) if normalize(old_contents) == normalize(contents) => {
return Ok(());
}
_ => (),
}
if mode == Mode::Verify {
anyhow::bail!("`{}` is not up-to-date", path.display());
}
eprintln!("updating {}", path.display());
fs2::write(path, contents)?;
return Ok(());
fn normalize(s: &str) -> String {
s.replace("\r\n", "\n")
}
}
fn extract_comment_blocks(text: &str) -> Vec<Vec<String>> {
do_extract_comment_blocks(text, false)
}
fn extract_comment_blocks_with_empty_lines(text: &str) -> Vec<Vec<String>> {
do_extract_comment_blocks(text, true)
}
fn do_extract_comment_blocks(text: &str, allow_blocks_with_empty_lines: bool) -> Vec<Vec<String>> {
let mut res = Vec::new();
let prefix = "// ";
let lines = text.lines().map(str::trim_start);
let mut block = vec![];
for line in lines {
if line == "//" && allow_blocks_with_empty_lines {
block.push(String::new());
continue;
}
let is_comment = line.starts_with(prefix);
if is_comment {
block.push(line[prefix.len()..].to_string());
} else if !block.is_empty() {
res.push(mem::replace(&mut block, Vec::new()));
}
}
if !block.is_empty() {
res.push(mem::replace(&mut block, Vec::new()))
}
res
}
| true |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 100