bytes+x86_64 +linux

The bytes module provides support functions for working with slices of bytes ([]u8).

Index

Types

// Undocumented types:
type tokenizer = struct {
	// string being tokenized
	s: []u8,
	// delimiter
	d: []u8,
	// p < 0 for reverse tokenizers, 0 <= p for forward ones.
	p: i64,
};

Functions

fn contains(haystack: []u8, needles: (u8 | []u8)...) bool;
fn cut(in: []u8, delim: ([]u8 | u8)) ([]u8, []u8);
fn equal(a: []u8, b: []u8) bool;
fn hasprefix(in: []u8, prefix: []u8) bool;
fn hassuffix(in: []u8, suffix: []u8) bool;
fn index(haystack: []u8, needle: (u8 | []u8)) (size | void);
fn ltrim(in: []u8, trim: u8...) []u8;
fn next_token(s: *tokenizer) ([]u8 | void);
fn peek_token(s: *tokenizer) ([]u8 | void);
fn rcut(in: []u8, delim: ([]u8 | u8)) ([]u8, []u8);
fn remaining_tokens(s: *tokenizer) []u8;
fn reverse(b: []u8) void;
fn rindex(haystack: []u8, needle: (u8 | []u8)) (size | void);
fn rtokenize(s: []u8, delim: []u8) tokenizer;
fn rtrim(in: []u8, trim: u8...) []u8;
fn tokenize(s: []u8, delim: []u8) tokenizer;
fn trim(in: []u8, trim: u8...) []u8;
fn zero(buf: []u8) void;

Types

type tokenizer[link]

Show undocumented member
type tokenizer = struct {
	// string being tokenized
	s: []u8,
	// delimiter
	d: []u8,
	// p < 0 for reverse tokenizers, 0 <= p for forward ones.
	p: i64,
};

Functions

fn contains[link]

fn contains(haystack: []u8, needles: (u8 | []u8)...) bool;

Returns true if a byte slice contains a byte or a sequence of bytes.

fn cut[link]

fn cut(in: []u8, delim: ([]u8 | u8)) ([]u8, []u8);

Returns the input slice "cut" along the first instance of a delimiter, returning everything up to the delimiter, and everything after the delimiter, in a tuple. The contents are borrowed from the input slice.

The caller must ensure that 'delimiter' is not an empty slice.

fn equal[link]

fn equal(a: []u8, b: []u8) bool;

Returns true if the two byte sequences are identical.

This function should NOT be used with sensitive data such as cryptographic hashes. In such a case, the constant-time crypto::compare should be used instead.

fn hasprefix[link]

fn hasprefix(in: []u8, prefix: []u8) bool;

Returns true if "in" has the given prefix, false otherwise

fn hassuffix[link]

fn hassuffix(in: []u8, suffix: []u8) bool;

Returns true if "in" has the given suffix, false otherwise

fn index[link]

fn index(haystack: []u8, needle: (u8 | []u8)) (size | void);

Returns the offset of the first instance of "needle" in a "haystack" of bytes, or void if it is not found.

fn ltrim[link]

fn ltrim(in: []u8, trim: u8...) []u8;

Returns a slice (borrowed from given input slice) after trimming off of the start of the input slice the bytes in the given list.

fn next_token[link]

fn next_token(s: *tokenizer) ([]u8 | void);

Returns the next slice from a tokenizer, and advances the cursor. Returns void if there are no tokens left and on all subsequent invocations. If a string starts with, or ends with, a token, an empty slice is returned at the beginning or end of the sequence, respectively.

fn peek_token[link]

fn peek_token(s: *tokenizer) ([]u8 | void);

Same as next_token, but does not advance the cursor

fn rcut[link]

fn rcut(in: []u8, delim: ([]u8 | u8)) ([]u8, []u8);

Returns the input slice "cut" along the last instance of a delimiter, returning everything up to the delimiter, and everything after the delimiter, in a tuple. The contents are borrowed from the input slice.

The caller must ensure that 'delimiter' is not an empty slice.

fn remaining_tokens[link]

fn remaining_tokens(s: *tokenizer) []u8;

Returns the remainder of the slice associated with a tokenizer, without doing any further tokenization.

fn reverse[link]

fn reverse(b: []u8) void;

Reverses a slice of bytes in place.

fn rindex[link]

fn rindex(haystack: []u8, needle: (u8 | []u8)) (size | void);

Returns the offset of the last instance of "needle" in a "haystack" of bytes, or void if it is not found.

fn rtokenize[link]

fn rtokenize(s: []u8, delim: []u8) tokenizer;

Returns a tokenizer which yields sub-slices tokenized by a delimiter, starting at the end of the slice and moving backwards with each call to next_token. The caller must ensure that 'delimiter' is not an empty slice. Can tokenize a slice of length less than types::I64_MAX.

fn rtrim[link]

fn rtrim(in: []u8, trim: u8...) []u8;

Returns a slice (borrowed from given input slice) after trimming off of the end of the input slice the bytes in the given list.

fn tokenize[link]

fn tokenize(s: []u8, delim: []u8) tokenizer;

Returns a tokenizer which yields sub-slices tokenized by a delimiter, starting at the beginning of the slice. The caller must ensure that 'delim' is not an empty slice. Can tokenize a slice of length less than types::I64_MAX.

fn trim[link]

fn trim(in: []u8, trim: u8...) []u8;

Returns a slice (borrowed from given input slice) after trimming off of the both ends of the input slice the bytes in the given list.

fn zero[link]

fn zero(buf: []u8) void;

Sets all bytes in a slice to zero. This is suitable for erasing private data from a slice.