Everywhere: Rename ASSERT => VERIFY

(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)

Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.

We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
This commit is contained in:
Andreas Kling 2021-02-23 20:42:32 +01:00
parent b33a6a443e
commit 5d180d1f99
Notes: sideshowbarker 2024-07-18 21:58:46 +09:00
725 changed files with 3448 additions and 3448 deletions

View File

@ -43,12 +43,12 @@ struct Array {
constexpr const T& at(size_t index) const
{
ASSERT(index < size());
VERIFY(index < size());
return (*this)[index];
}
constexpr T& at(size_t index)
{
ASSERT(index < size());
VERIFY(index < size());
return (*this)[index];
}

View File

@ -31,9 +31,9 @@
#else
# include <assert.h>
# ifndef __serenity__
# define ASSERT assert
# define ASSERT_NOT_REACHED() assert(false)
# define VERIFY assert
# define VERIFY_NOT_REACHED() assert(false)
# define RELEASE_ASSERT assert
# define TODO ASSERT_NOT_REACHED
# define TODO VERIFY_NOT_REACHED
# endif
#endif

View File

@ -85,12 +85,12 @@ public:
size_t size_in_bytes() const { return ceil_div(m_size, static_cast<size_t>(8)); }
bool get(size_t index) const
{
ASSERT(index < m_size);
VERIFY(index < m_size);
return 0 != (m_data[index / 8] & (1u << (index % 8)));
}
void set(size_t index, bool value) const
{
ASSERT(index < m_size);
VERIFY(index < m_size);
if (value)
m_data[index / 8] |= static_cast<u8>((1u << (index % 8)));
else
@ -104,8 +104,8 @@ public:
size_t count_in_range(size_t start, size_t len, bool value) const
{
ASSERT(start < m_size);
ASSERT(start + len <= m_size);
VERIFY(start < m_size);
VERIFY(start + len <= m_size);
if (len == 0)
return 0;
@ -153,8 +153,8 @@ public:
void grow(size_t size, bool default_value)
{
ASSERT(m_owned);
ASSERT(size > m_size);
VERIFY(m_owned);
VERIFY(size > m_size);
auto previous_size_bytes = size_in_bytes();
auto previous_size = m_size;
@ -176,8 +176,8 @@ public:
template<bool VALUE>
void set_range(size_t start, size_t len)
{
ASSERT(start < m_size);
ASSERT(start + len <= m_size);
VERIFY(start < m_size);
VERIFY(start + len <= m_size);
if (len == 0)
return;
@ -228,7 +228,7 @@ public:
template<bool VALUE>
Optional<size_t> find_one_anywhere(size_t hint = 0) const
{
ASSERT(hint < m_size);
VERIFY(hint < m_size);
const u8* end = &m_data[m_size / 8];
for (;;) {
@ -249,7 +249,7 @@ public:
byte = m_data[i];
if constexpr (!VALUE)
byte = ~byte;
ASSERT(byte != 0);
VERIFY(byte != 0);
return i * 8 + __builtin_ffs(byte) - 1;
}
}
@ -264,7 +264,7 @@ public:
u8 byte = VALUE ? 0x00 : 0xff;
size_t i = (const u8*)ptr32 - &m_data[0];
size_t byte_count = m_size / 8;
ASSERT(i <= byte_count);
VERIFY(i <= byte_count);
while (i < byte_count && m_data[i] == byte)
i++;
if (i == byte_count) {
@ -279,7 +279,7 @@ public:
byte = m_data[i];
if constexpr (!VALUE)
byte = ~byte;
ASSERT(byte != 0);
VERIFY(byte != 0);
return i * 8 + __builtin_ffs(byte) - 1;
}
@ -288,7 +288,7 @@ public:
val32 = *ptr32;
if constexpr (!VALUE)
val32 = ~val32;
ASSERT(val32 != 0);
VERIFY(val32 != 0);
return ((const u8*)ptr32 - &m_data[0]) * 8 + __builtin_ffsl(val32) - 1;
}
}
@ -317,7 +317,7 @@ public:
byte = m_data[i];
if constexpr (!VALUE)
byte = ~byte;
ASSERT(byte != 0);
VERIFY(byte != 0);
return i * 8 + __builtin_ffs(byte) - 1;
}
@ -509,7 +509,7 @@ public:
: m_size(size)
, m_owned(true)
{
ASSERT(m_size != 0);
VERIFY(m_size != 0);
m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
fill(default_value);
}

View File

@ -54,12 +54,12 @@ public:
u8& operator[](size_t i)
{
ASSERT(i < m_size);
VERIFY(i < m_size);
return m_data[i];
}
const u8& operator[](size_t i) const
{
ASSERT(i < m_size);
VERIFY(i < m_size);
return m_data[i];
}
bool is_empty() const { return !m_size; }
@ -83,7 +83,7 @@ public:
// NOTE: trim() does not reallocate.
void trim(size_t size)
{
ASSERT(size <= m_size);
VERIFY(size <= m_size);
m_size = size;
}
@ -145,12 +145,12 @@ public:
u8& operator[](size_t i)
{
ASSERT(m_impl);
VERIFY(m_impl);
return (*m_impl)[i];
}
u8 operator[](size_t i) const
{
ASSERT(m_impl);
VERIFY(m_impl);
return (*m_impl)[i];
}
bool is_empty() const { return !m_impl || m_impl->is_empty(); }
@ -215,7 +215,7 @@ public:
return {};
// I cannot hand you a slice I don't have
ASSERT(offset + size <= this->size());
VERIFY(offset + size <= this->size());
return copy(offset_pointer(offset), size);
}
@ -232,7 +232,7 @@ public:
{
if (data_size == 0)
return;
ASSERT(data != nullptr);
VERIFY(data != nullptr);
int old_size = size();
grow(size() + data_size);
__builtin_memcpy(this->data() + old_size, data, data_size);
@ -246,7 +246,7 @@ public:
void overwrite(size_t offset, const void* data, size_t data_size)
{
// make sure we're not told to write past the end
ASSERT(offset + data_size <= size());
VERIFY(offset + data_size <= size());
__builtin_memcpy(this->data() + offset, data, data_size);
}
@ -285,7 +285,7 @@ inline ByteBufferImpl::ByteBufferImpl(const void* data, size_t size)
inline void ByteBufferImpl::grow(size_t size)
{
ASSERT(size > m_size);
VERIFY(size > m_size);
if (size == 0) {
if (m_data)
kfree(m_data);

View File

@ -156,13 +156,13 @@ public:
ALWAYS_INLINE constexpr bool operator!() const
{
ASSERT(!m_overflow);
VERIFY(!m_overflow);
return !m_value;
}
ALWAYS_INLINE constexpr T value() const
{
ASSERT(!m_overflow);
VERIFY(!m_overflow);
return m_value;
}

View File

@ -42,7 +42,7 @@
#ifndef DBGLN_NO_COMPILETIME_FORMAT_CHECK
namespace AK::Format::Detail {
// We have to define a local "purely constexpr" Array that doesn't lead back to us (via e.g. ASSERT)
// We have to define a local "purely constexpr" Array that doesn't lead back to us (via e.g. VERIFY)
template<typename T, size_t Size>
struct Array {
constexpr static size_t size() { return Size; }

View File

@ -50,7 +50,7 @@ public:
T dequeue_end()
{
ASSERT(!this->is_empty());
VERIFY(!this->is_empty());
auto& slot = this->elements()[(this->m_head + this->m_size - 1) % Capacity];
T value = move(slot);
slot.~T();

View File

@ -55,7 +55,7 @@ public:
}
const auto nwritten = write(bytes);
ASSERT(nwritten == bytes.size());
VERIFY(nwritten == bytes.size());
return true;
}
@ -123,7 +123,7 @@ public:
Bytes reserve_contigous_space(size_t count)
{
ASSERT(count <= remaining_contigous_space());
VERIFY(count <= remaining_contigous_space());
Bytes bytes { m_queue.m_storage + (m_queue.head_index() + m_queue.size()) % Capacity, count };

View File

@ -76,7 +76,7 @@ public:
T dequeue()
{
ASSERT(!is_empty());
VERIFY(!is_empty());
auto& slot = elements()[m_head];
T value = move(slot);
slot.~T();

View File

@ -91,22 +91,22 @@ public:
T& first()
{
ASSERT(m_head);
VERIFY(m_head);
return m_head->value;
}
const T& first() const
{
ASSERT(m_head);
VERIFY(m_head);
return m_head->value;
}
T& last()
{
ASSERT(m_head);
VERIFY(m_head);
return m_tail->value;
}
const T& last() const
{
ASSERT(m_head);
VERIFY(m_head);
return m_tail->value;
}
@ -117,13 +117,13 @@ public:
requires { T(value); }, "Conversion operator is missing.");
auto* node = new Node(forward<U>(value));
if (!m_head) {
ASSERT(!m_tail);
VERIFY(!m_tail);
m_head = node;
m_tail = node;
return;
}
ASSERT(m_tail);
ASSERT(!node->next);
VERIFY(m_tail);
VERIFY(!node->next);
m_tail->next = node;
node->prev = m_tail;
m_tail = node;
@ -135,13 +135,13 @@ public:
static_assert(IsSame<T, U>::value);
auto* node = new Node(forward<U>(value));
if (!m_head) {
ASSERT(!m_tail);
VERIFY(!m_tail);
m_head = node;
m_tail = node;
return;
}
ASSERT(m_tail);
ASSERT(!node->prev);
VERIFY(m_tail);
VERIFY(!node->prev);
m_head->prev = node;
node->next = m_head;
m_head = node;
@ -174,20 +174,20 @@ public:
void remove(Iterator it)
{
ASSERT(it.m_node);
VERIFY(it.m_node);
auto* node = it.m_node;
if (node->prev) {
ASSERT(node != m_head);
VERIFY(node != m_head);
node->prev->next = node->next;
} else {
ASSERT(node == m_head);
VERIFY(node == m_head);
m_head = node->next;
}
if (node->next) {
ASSERT(node != m_tail);
VERIFY(node != m_tail);
node->next->prev = node->prev;
} else {
ASSERT(node == m_tail);
VERIFY(node == m_tail);
m_tail = node->prev;
}
delete node;

View File

@ -38,8 +38,8 @@ struct FlyStringImplTraits : public AK::Traits<StringImpl*> {
static unsigned hash(const StringImpl* s) { return s ? s->hash() : 0; }
static bool equals(const StringImpl* a, const StringImpl* b)
{
ASSERT(a);
ASSERT(b);
VERIFY(a);
VERIFY(b);
return *a == *b;
}
};
@ -70,7 +70,7 @@ FlyString::FlyString(const String& string)
string.impl()->set_fly({}, true);
m_impl = string.impl();
} else {
ASSERT((*it)->is_fly());
VERIFY((*it)->is_fly());
m_impl = *it;
}
}

View File

@ -47,7 +47,7 @@ constexpr size_t use_next_index = NumericLimits<size_t>::max();
// 65 bytes. Choosing a larger power of two won't hurt and is a bit of mitigation against out-of-bounds accesses.
inline size_t convert_unsigned_to_string(u64 value, Array<u8, 128>& buffer, u8 base, bool upper_case)
{
ASSERT(base >= 2 && base <= 16);
VERIFY(base >= 2 && base <= 16);
static constexpr const char* lowercase_lookup = "0123456789abcdef";
static constexpr const char* uppercase_lookup = "0123456789ABCDEF";
@ -80,7 +80,7 @@ void vformat_impl(TypeErasedFormatParams& params, FormatBuilder& builder, Format
FormatParser::FormatSpecifier specifier;
if (!parser.consume_specifier(specifier)) {
ASSERT(parser.is_eof());
VERIFY(parser.is_eof());
return;
}
@ -118,9 +118,9 @@ size_t TypeErasedParameter::to_size() const
else if (type == TypeErasedParameter::Type::Int64)
svalue = *reinterpret_cast<const i64*>(value);
else
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
ASSERT(svalue >= 0);
VERIFY(svalue >= 0);
return static_cast<size_t>(svalue);
}
@ -163,7 +163,7 @@ bool FormatParser::consume_number(size_t& value)
}
bool FormatParser::consume_specifier(FormatSpecifier& specifier)
{
ASSERT(!next_is('}'));
VERIFY(!next_is('}'));
if (!consume_specific('{'))
return false;
@ -176,7 +176,7 @@ bool FormatParser::consume_specifier(FormatSpecifier& specifier)
size_t level = 1;
while (level > 0) {
ASSERT(!is_eof());
VERIFY(!is_eof());
if (consume_specific('{')) {
++level;
@ -194,7 +194,7 @@ bool FormatParser::consume_specifier(FormatSpecifier& specifier)
specifier.flags = m_input.substring_view(begin, tell() - begin - 1);
} else {
if (!consume_specific('}'))
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
specifier.flags = "";
}
@ -210,7 +210,7 @@ bool FormatParser::consume_replacement_field(size_t& index)
index = use_next_index;
if (!consume_specific('}'))
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return true;
}
@ -426,7 +426,7 @@ void vformat(const LogStream& stream, StringView fmtstr, TypeErasedFormatParams
void StandardFormatter::parse(TypeErasedFormatParams& params, FormatParser& parser)
{
if (StringView { "<^>" }.contains(parser.peek(1))) {
ASSERT(!parser.next_is(is_any_of("{}")));
VERIFY(!parser.next_is(is_any_of("{}")));
m_fill = parser.consume();
}
@ -498,21 +498,21 @@ void StandardFormatter::parse(TypeErasedFormatParams& params, FormatParser& pars
if (!parser.is_eof())
dbgln("{} did not consume '{}'", __PRETTY_FUNCTION__, parser.remaining());
ASSERT(parser.is_eof());
VERIFY(parser.is_eof());
}
void Formatter<StringView>::format(FormatBuilder& builder, StringView value)
{
if (m_sign_mode != FormatBuilder::SignMode::Default)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_alternative_form)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_zero_pad)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_mode != Mode::Default && m_mode != Mode::String && m_mode != Mode::Character)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_width.has_value() && m_precision.has_value())
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
m_width = m_width.value_or(0);
m_precision = m_precision.value_or(NumericLimits<size_t>::max());
@ -530,7 +530,7 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
{
if (m_mode == Mode::Character) {
// FIXME: We just support ASCII for now, in the future maybe unicode?
ASSERT(value >= 0 && value <= 127);
VERIFY(value >= 0 && value <= 127);
m_mode = Mode::String;
@ -539,17 +539,17 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
}
if (m_precision.has_value())
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_mode == Mode::Pointer) {
if (m_sign_mode != FormatBuilder::SignMode::Default)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_align != FormatBuilder::Align::Default)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_alternative_form)
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
if (m_width.has_value())
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
m_mode = Mode::Hexadecimal;
m_alternative_form = true;
@ -574,7 +574,7 @@ void Formatter<T, typename EnableIf<IsIntegral<T>::value>::Type>::format(FormatB
base = 16;
upper_case = true;
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
m_width = m_width.value_or(0);
@ -621,7 +621,7 @@ void Formatter<double>::format(FormatBuilder& builder, double value)
base = 16;
upper_case = true;
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
m_width = m_width.value_or(0);
@ -647,7 +647,7 @@ void vout(FILE* file, StringView fmtstr, TypeErasedFormatParams params, bool new
const auto string = builder.string_view();
const auto retval = ::fwrite(string.characters_without_null_termination(), 1, string.length(), file);
ASSERT(static_cast<size_t>(retval) == string.length());
VERIFY(static_cast<size_t>(retval) == string.length());
}
#endif

View File

@ -87,7 +87,7 @@ struct TypeErasedParameter {
return Type::Int64;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
template<typename T>

View File

@ -53,7 +53,7 @@ public:
Out operator()(In... in) const
{
ASSERT(m_callable_wrapper);
VERIFY(m_callable_wrapper);
return m_callable_wrapper->call(forward<In>(in)...);
}

View File

@ -79,14 +79,14 @@ bool GenericLexer::next_is(const char* expected) const
// Go back to the previous character
void GenericLexer::retreat()
{
ASSERT(m_index > 0);
VERIFY(m_index > 0);
m_index--;
}
// Consume a character and advance the parser index
char GenericLexer::consume()
{
ASSERT(!is_eof());
VERIFY(!is_eof());
return m_input[m_index++];
}

View File

@ -157,7 +157,7 @@ public:
void ensure_capacity(size_t capacity)
{
ASSERT(capacity >= size());
VERIFY(capacity >= size());
rehash(capacity * 2);
}
@ -256,11 +256,11 @@ public:
void remove(Iterator iterator)
{
ASSERT(iterator.m_bucket);
VERIFY(iterator.m_bucket);
auto& bucket = *iterator.m_bucket;
ASSERT(bucket.used);
ASSERT(!bucket.end);
ASSERT(!bucket.deleted);
VERIFY(bucket.used);
VERIFY(!bucket.end);
VERIFY(!bucket.deleted);
bucket.slot()->~T();
bucket.used = false;
bucket.deleted = true;

View File

@ -49,7 +49,7 @@ public:
return allocated_id;
}
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void deallocate(int id)

View File

@ -65,7 +65,7 @@ public:
constexpr u8 operator[](int i) const
{
ASSERT(i >= 0 && i < 4);
VERIFY(i >= 0 && i < 4);
return octet(SubnetClass(i));
}

View File

@ -178,7 +178,7 @@ template<typename T>
inline void InlineLinkedList<T>::prepend(T* node)
{
if (!m_head) {
ASSERT(!m_tail);
VERIFY(!m_tail);
m_head = node;
m_tail = node;
node->set_prev(0);
@ -186,7 +186,7 @@ inline void InlineLinkedList<T>::prepend(T* node)
return;
}
ASSERT(m_tail);
VERIFY(m_tail);
m_head->set_prev(node);
node->set_next(m_head);
node->set_prev(0);
@ -197,7 +197,7 @@ template<typename T>
inline void InlineLinkedList<T>::append(T* node)
{
if (!m_tail) {
ASSERT(!m_head);
VERIFY(!m_head);
m_head = node;
m_tail = node;
node->set_prev(0);
@ -205,7 +205,7 @@ inline void InlineLinkedList<T>::append(T* node)
return;
}
ASSERT(m_head);
VERIFY(m_head);
m_tail->set_next(node);
node->set_prev(m_tail);
node->set_next(0);
@ -215,18 +215,18 @@ inline void InlineLinkedList<T>::append(T* node)
template<typename T>
inline void InlineLinkedList<T>::insert_before(T* before_node, T* node)
{
ASSERT(before_node);
ASSERT(node);
ASSERT(before_node != node);
ASSERT(!is_empty());
VERIFY(before_node);
VERIFY(node);
VERIFY(before_node != node);
VERIFY(!is_empty());
if (m_head == before_node) {
ASSERT(!before_node->prev());
VERIFY(!before_node->prev());
m_head = node;
node->set_prev(0);
node->set_next(before_node);
before_node->set_prev(node);
} else {
ASSERT(before_node->prev());
VERIFY(before_node->prev());
node->set_prev(before_node->prev());
before_node->prev()->set_next(node);
node->set_next(before_node);
@ -237,18 +237,18 @@ inline void InlineLinkedList<T>::insert_before(T* before_node, T* node)
template<typename T>
inline void InlineLinkedList<T>::insert_after(T* after_node, T* node)
{
ASSERT(after_node);
ASSERT(node);
ASSERT(after_node != node);
ASSERT(!is_empty());
VERIFY(after_node);
VERIFY(node);
VERIFY(after_node != node);
VERIFY(!is_empty());
if (m_tail == after_node) {
ASSERT(!after_node->next());
VERIFY(!after_node->next());
m_tail = node;
node->set_prev(after_node);
node->set_next(0);
after_node->set_next(node);
} else {
ASSERT(after_node->next());
VERIFY(after_node->next());
node->set_prev(after_node);
node->set_next(after_node->next());
after_node->next()->set_prev(node);
@ -260,18 +260,18 @@ template<typename T>
inline void InlineLinkedList<T>::remove(T* node)
{
if (node->prev()) {
ASSERT(node != m_head);
VERIFY(node != m_head);
node->prev()->set_next(node->next());
} else {
ASSERT(node == m_head);
VERIFY(node == m_head);
m_head = node->next();
}
if (node->next()) {
ASSERT(node != m_tail);
VERIFY(node != m_tail);
node->next()->set_prev(node->prev());
} else {
ASSERT(node == m_tail);
VERIFY(node == m_tail);
m_tail = node->prev();
}
@ -310,15 +310,15 @@ inline void InlineLinkedList<T>::append(InlineLinkedList<T>& other)
return;
}
ASSERT(tail());
ASSERT(other.head());
VERIFY(tail());
VERIFY(other.head());
T* other_head = other.head();
T* other_tail = other.tail();
other.clear();
ASSERT(!m_tail->next());
VERIFY(!m_tail->next());
m_tail->set_next(other_head);
ASSERT(!other_head->prev());
VERIFY(!other_head->prev());
other_head->set_prev(m_tail);
m_tail = other_tail;
}

View File

@ -284,7 +284,7 @@ inline IntrusiveListNode::~IntrusiveListNode()
inline void IntrusiveListNode::remove()
{
ASSERT(m_storage);
VERIFY(m_storage);
if (m_storage->m_first == this)
m_storage->m_first = m_next;
if (m_storage->m_last == this)

View File

@ -92,7 +92,7 @@ public:
void finish()
{
ASSERT(!m_finished);
VERIFY(!m_finished);
m_finished = true;
m_builder.append(']');
}

View File

@ -191,7 +191,7 @@ inline void JsonValue::serialize(Builder& builder) const
builder.append("null");
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View File

@ -141,7 +141,7 @@ public:
void finish()
{
ASSERT(!m_finished);
VERIFY(!m_finished);
m_finished = true;
m_builder.append('}');
}

View File

@ -45,7 +45,7 @@ JsonValue JsonPath::resolve(const JsonValue& top_root) const
root = JsonValue { root.as_array().at(element.index()) };
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}
return root;

View File

@ -56,13 +56,13 @@ public:
Kind kind() const { return m_kind; }
const String& key() const
{
ASSERT(m_kind == Kind::Key);
VERIFY(m_kind == Kind::Key);
return m_key;
}
size_t index() const
{
ASSERT(m_kind == Kind::Index);
VERIFY(m_kind == Kind::Index);
return m_index;
}

View File

@ -55,7 +55,7 @@ void JsonValue::copy_from(const JsonValue& other)
m_type = other.m_type;
switch (m_type) {
case Type::String:
ASSERT(!m_value.as_string);
VERIFY(!m_value.as_string);
m_value.as_string = other.m_value.as_string;
m_value.as_string->ref();
break;

View File

@ -127,56 +127,56 @@ public:
i32 as_i32() const
{
ASSERT(is_i32());
VERIFY(is_i32());
return m_value.as_i32;
}
u32 as_u32() const
{
ASSERT(is_u32());
VERIFY(is_u32());
return m_value.as_u32;
}
i64 as_i64() const
{
ASSERT(is_i64());
VERIFY(is_i64());
return m_value.as_i64;
}
u64 as_u64() const
{
ASSERT(is_u64());
VERIFY(is_u64());
return m_value.as_u64;
}
int as_bool() const
{
ASSERT(is_bool());
VERIFY(is_bool());
return m_value.as_bool;
}
String as_string() const
{
ASSERT(is_string());
VERIFY(is_string());
return *m_value.as_string;
}
const JsonObject& as_object() const
{
ASSERT(is_object());
VERIFY(is_object());
return *m_value.as_object;
}
const JsonArray& as_array() const
{
ASSERT(is_array());
VERIFY(is_array());
return *m_value.as_array;
}
#if !defined(KERNEL)
double as_double() const
{
ASSERT(is_double());
VERIFY(is_double());
return m_value.as_double;
}
#endif

View File

@ -52,13 +52,13 @@ public:
constexpr const u8& operator[](unsigned i) const
{
ASSERT(i < s_mac_address_length);
VERIFY(i < s_mac_address_length);
return m_data[i];
}
constexpr u8& operator[](unsigned i)
{
ASSERT(i < s_mac_address_length);
VERIFY(i < s_mac_address_length);
return m_data[i];
}

View File

@ -68,7 +68,7 @@ MappedFile::MappedFile(void* ptr, size_t size)
MappedFile::~MappedFile()
{
auto rc = munmap(m_data, m_size);
ASSERT(rc == 0);
VERIFY(rc == 0);
}
}

View File

@ -37,7 +37,7 @@ namespace AK {
namespace {
const static void* bitap_bitwise(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
{
ASSERT(needle_length < 32);
VERIFY(needle_length < 32);
u64 lookup = 0xfffffffe;

View File

@ -79,7 +79,7 @@ public:
void seek(size_t offset)
{
ASSERT(offset < m_bytes.size());
VERIFY(offset < m_bytes.size());
m_offset = offset;
}
@ -309,7 +309,7 @@ public:
auto buffer = ByteBuffer::create_uninitialized(size());
const auto nread = read_without_consuming(buffer);
ASSERT(nread == buffer.size());
VERIFY(nread == buffer.size());
return buffer;
}

View File

@ -59,13 +59,13 @@ public:
NonnullOwnPtr(NonnullOwnPtr&& other)
: m_ptr(other.leak_ptr())
{
ASSERT(m_ptr);
VERIFY(m_ptr);
}
template<typename U>
NonnullOwnPtr(NonnullOwnPtr<U>&& other)
: m_ptr(other.leak_ptr())
{
ASSERT(m_ptr);
VERIFY(m_ptr);
}
~NonnullOwnPtr()
{
@ -147,7 +147,7 @@ public:
template<typename U>
NonnullOwnPtr<U> release_nonnull()
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
}

View File

@ -72,42 +72,42 @@ public:
ALWAYS_INLINE NonnullRefPtr(const T& object)
: m_bits((FlatPtr)&object)
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
const_cast<T&>(object).ref();
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(const U& object)
: m_bits((FlatPtr) static_cast<const T*>(&object))
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
const_cast<T&>(static_cast<const T&>(object)).ref();
}
ALWAYS_INLINE NonnullRefPtr(AdoptTag, T& object)
: m_bits((FlatPtr)&object)
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
}
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr&& other)
: m_bits((FlatPtr)&other.leak_ref())
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr<U>&& other)
: m_bits((FlatPtr)&other.leak_ref())
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
}
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr& other)
: m_bits((FlatPtr)other.add_ref())
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr<U>& other)
: m_bits((FlatPtr)other.add_ref())
{
ASSERT(!(m_bits & 1));
VERIFY(!(m_bits & 1));
}
ALWAYS_INLINE ~NonnullRefPtr()
{
@ -170,7 +170,7 @@ public:
[[nodiscard]] ALWAYS_INLINE T& leak_ref()
{
T* ptr = exchange(nullptr);
ASSERT(ptr);
VERIFY(ptr);
return *ptr;
}
@ -253,7 +253,7 @@ private:
ALWAYS_INLINE T* as_nonnull_ptr() const
{
T* ptr = (T*)(m_bits.load(AK::MemoryOrder::memory_order_relaxed) & ~(FlatPtr)1);
ASSERT(ptr);
VERIFY(ptr);
return ptr;
}
@ -273,7 +273,7 @@ private:
Kernel::Processor::wait_check();
#endif
}
ASSERT(!(bits & 1));
VERIFY(!(bits & 1));
f((T*)bits);
m_bits.store(bits, AK::MemoryOrder::memory_order_release);
}
@ -286,7 +286,7 @@ private:
ALWAYS_INLINE T* exchange(T* new_ptr)
{
ASSERT(!((FlatPtr)new_ptr & 1));
VERIFY(!((FlatPtr)new_ptr & 1));
#ifdef KERNEL
// We don't want to be pre-empted while we have the lock bit set
Kernel::ScopedCritical critical;
@ -301,7 +301,7 @@ private:
Kernel::Processor::wait_check();
#endif
}
ASSERT(!(expected & 1));
VERIFY(!(expected & 1));
return (T*)expected;
}

View File

@ -128,19 +128,19 @@ public:
[[nodiscard]] ALWAYS_INLINE T& value()
{
ASSERT(m_has_value);
VERIFY(m_has_value);
return *reinterpret_cast<T*>(&m_storage);
}
[[nodiscard]] ALWAYS_INLINE const T& value() const
{
ASSERT(m_has_value);
VERIFY(m_has_value);
return *reinterpret_cast<const T*>(&m_storage);
}
[[nodiscard]] T release_value()
{
ASSERT(m_has_value);
VERIFY(m_has_value);
T released_value = move(value());
value().~T();
m_has_value = false;

View File

@ -112,7 +112,7 @@ public:
{
OwnPtr ptr(move(other));
swap(ptr);
ASSERT(m_ptr);
VERIFY(m_ptr);
return *this;
}
@ -147,14 +147,14 @@ public:
NonnullOwnPtr<T> release_nonnull()
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return NonnullOwnPtr<T>(NonnullOwnPtr<T>::Adopt, *leak_ptr());
}
template<typename U>
NonnullOwnPtr<U> release_nonnull()
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return NonnullOwnPtr<U>(NonnullOwnPtr<U>::Adopt, static_cast<U&>(*leak_ptr()));
}
@ -163,25 +163,25 @@ public:
T* operator->()
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return m_ptr;
}
const T* operator->() const
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return m_ptr;
}
T& operator*()
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return *m_ptr;
}
const T& operator*() const
{
ASSERT(m_ptr);
VERIFY(m_ptr);
return *m_ptr;
}

View File

@ -52,7 +52,7 @@ public:
T dequeue()
{
ASSERT(!is_empty());
VERIFY(!is_empty());
auto value = move((*m_segments.first())[m_index_into_first++]);
if (m_index_into_first == segment_size) {
m_segments.take_first();
@ -64,7 +64,7 @@ public:
const T& head() const
{
ASSERT(!is_empty());
VERIFY(!is_empty());
return (*m_segments.first())[m_index_into_first];
}

View File

@ -70,8 +70,8 @@ public:
ALWAYS_INLINE void ref() const
{
auto old_ref_count = m_ref_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
ASSERT(old_ref_count > 0);
ASSERT(!Checked<RefCountType>::addition_would_overflow(old_ref_count, 1));
VERIFY(old_ref_count > 0);
VERIFY(!Checked<RefCountType>::addition_would_overflow(old_ref_count, 1));
}
[[nodiscard]] ALWAYS_INLINE bool try_ref() const
@ -80,7 +80,7 @@ public:
for (;;) {
if (expected == 0)
return false;
ASSERT(!Checked<RefCountType>::addition_would_overflow(expected, 1));
VERIFY(!Checked<RefCountType>::addition_would_overflow(expected, 1));
if (m_ref_count.compare_exchange_strong(expected, expected + 1, AK::MemoryOrder::memory_order_acquire))
return true;
}
@ -95,13 +95,13 @@ protected:
RefCountedBase() = default;
ALWAYS_INLINE ~RefCountedBase()
{
ASSERT(m_ref_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
VERIFY(m_ref_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
}
ALWAYS_INLINE RefCountType deref_base() const
{
auto old_ref_count = m_ref_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(old_ref_count > 0);
VERIFY(old_ref_count > 0);
return old_ref_count - 1;
}

View File

@ -50,7 +50,7 @@ struct RefPtrTraits {
ALWAYS_INLINE static FlatPtr as_bits(T* ptr)
{
ASSERT(!((FlatPtr)ptr & 1));
VERIFY(!((FlatPtr)ptr & 1));
return (FlatPtr)ptr;
}
@ -70,7 +70,7 @@ struct RefPtrTraits {
ALWAYS_INLINE static FlatPtr exchange(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
// Only exchange when lock is not held
ASSERT(!(new_value & 1));
VERIFY(!(new_value & 1));
FlatPtr expected = atomic_var.load(AK::MemoryOrder::memory_order_relaxed);
for (;;) {
expected &= ~(FlatPtr)1; // only if lock bit is not set
@ -86,7 +86,7 @@ struct RefPtrTraits {
ALWAYS_INLINE static bool exchange_if_null(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
// Only exchange when lock is not held
ASSERT(!(new_value & 1));
VERIFY(!(new_value & 1));
for (;;) {
FlatPtr expected = default_null_value; // only if lock bit is not set
if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel))
@ -116,13 +116,13 @@ struct RefPtrTraits {
Kernel::Processor::wait_check();
#endif
}
ASSERT(!(bits & 1));
VERIFY(!(bits & 1));
return bits;
}
ALWAYS_INLINE static void unlock(Atomic<FlatPtr>& atomic_var, FlatPtr new_value)
{
ASSERT(!(new_value & 1));
VERIFY(!(new_value & 1));
atomic_var.store(new_value, AK::MemoryOrder::memory_order_release);
}
@ -153,14 +153,14 @@ public:
: m_bits(PtrTraits::as_bits(const_cast<T*>(&object)))
{
T* ptr = const_cast<T*>(&object);
ASSERT(ptr);
ASSERT(!is_null());
VERIFY(ptr);
VERIFY(!is_null());
ptr->ref();
}
RefPtr(AdoptTag, T& object)
: m_bits(PtrTraits::as_bits(&object))
{
ASSERT(!is_null());
VERIFY(!is_null());
}
RefPtr(RefPtr&& other)
: m_bits(other.leak_ref_raw())
@ -179,7 +179,7 @@ public:
ALWAYS_INLINE RefPtr(NonnullRefPtr<U>&& other)
: m_bits(PtrTraits::as_bits(&other.leak_ref()))
{
ASSERT(!is_null());
VERIFY(!is_null());
}
template<typename U, typename P = RefPtrTraits<U>>
RefPtr(RefPtr<U, P>&& other)
@ -330,7 +330,7 @@ public:
NonnullRefPtr<T> release_nonnull()
{
FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::default_null_value);
ASSERT(!PtrTraits::is_null(bits));
VERIFY(!PtrTraits::is_null(bits));
return NonnullRefPtr<T>(NonnullRefPtr<T>::Adopt, *PtrTraits::as_ptr(bits));
}
@ -384,7 +384,7 @@ public:
{
// make sure we are holding a null value
FlatPtr bits = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
ASSERT(PtrTraits::is_null(bits));
VERIFY(PtrTraits::is_null(bits));
return PtrTraits::to_null_value(bits);
}
template<typename U = T, typename EnableIf<IsSame<U, T>::value && !IsNullPointer<typename PtrTraits::NullType>::value>::Type* = nullptr>
@ -392,7 +392,7 @@ public:
{
// make sure that new null value would be interpreted as a null value
FlatPtr bits = PtrTraits::from_null_value(value);
ASSERT(PtrTraits::is_null(bits));
VERIFY(PtrTraits::is_null(bits));
assign_raw(bits);
}
@ -454,7 +454,7 @@ private:
ALWAYS_INLINE T* as_nonnull_ptr(FlatPtr bits) const
{
ASSERT(!PtrTraits::is_null(bits));
VERIFY(!PtrTraits::is_null(bits));
return PtrTraits::as_ptr(bits);
}

View File

@ -83,9 +83,9 @@ public:
}
if constexpr (allow_create) {
// We should always return an instance if we allow creating one
ASSERT(obj != nullptr);
VERIFY(obj != nullptr);
}
ASSERT(obj != (T*)0x1);
VERIFY(obj != (T*)0x1);
}
return obj;
}

View File

@ -104,28 +104,28 @@ public:
T& first()
{
ASSERT(head());
VERIFY(head());
return head()->value;
}
const T& first() const
{
ASSERT(head());
VERIFY(head());
return head()->value;
}
T& last()
{
ASSERT(head());
VERIFY(head());
return tail()->value;
}
const T& last() const
{
ASSERT(head());
VERIFY(head());
return tail()->value;
}
T take_first()
{
ASSERT(m_head);
VERIFY(m_head);
auto* prev_head = m_head;
T value = move(first());
if (m_tail == m_head)
@ -187,7 +187,7 @@ public:
void remove(Iterator iterator)
{
ASSERT(!iterator.is_end());
VERIFY(!iterator.is_end());
if (m_head == iterator.m_node)
m_head = iterator.m_node->next;
if (m_tail == iterator.m_node)

View File

@ -78,11 +78,11 @@ public:
const auto placeholder = consume_until_without_consuming_stop_character(m_closing);
if (!lexer.consume_specific(m_closing))
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
m_builder.append(get(placeholder));
} else {
ASSERT(lexer.is_eof());
VERIFY(lexer.is_eof());
}
}
}

View File

@ -140,12 +140,12 @@ public:
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start, size_t length) const
{
ASSERT(start + length <= size());
VERIFY(start + length <= size());
return { this->m_values + start, length };
}
[[nodiscard]] ALWAYS_INLINE constexpr Span slice(size_t start) const
{
ASSERT(start <= size());
VERIFY(start <= size());
return { this->m_values + start, size() - start };
}
@ -156,20 +156,20 @@ public:
ALWAYS_INLINE constexpr T* offset(size_t start) const
{
ASSERT(start < this->m_size);
VERIFY(start < this->m_size);
return this->m_values + start;
}
ALWAYS_INLINE constexpr void overwrite(size_t offset, const void* data, size_t data_size)
{
// make sure we're not told to write past the end
ASSERT(offset + data_size <= size());
VERIFY(offset + data_size <= size());
__builtin_memcpy(this->data() + offset, data, data_size);
}
ALWAYS_INLINE constexpr size_t copy_to(Span<typename RemoveConst<T>::Type> other) const
{
ASSERT(other.size() >= size());
VERIFY(other.size() >= size());
return TypedTransfer<typename RemoveConst<T>::Type>::copy(other.data(), data(), size());
}
@ -198,12 +198,12 @@ public:
ALWAYS_INLINE constexpr const T& at(size_t index) const
{
ASSERT(index < this->m_size);
VERIFY(index < this->m_size);
return this->m_values[index];
}
ALWAYS_INLINE constexpr T& at(size_t index)
{
ASSERT(index < this->m_size);
VERIFY(index < this->m_size);
return this->m_values[index];
}

View File

@ -42,17 +42,17 @@ StackInfo::StackInfo()
#ifdef __serenity__
if (get_stack_bounds(&m_base, &m_size) < 0) {
perror("get_stack_bounds");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
#elif __linux__
pthread_attr_t attr = {};
if (int rc = pthread_getattr_np(pthread_self(), &attr) != 0) {
fprintf(stderr, "pthread_getattr_np: %s\n", strerror(-rc));
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
if (int rc = pthread_attr_getstack(&attr, (void**)&m_base, &m_size) != 0) {
fprintf(stderr, "pthread_attr_getstack: %s\n", strerror(-rc));
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
pthread_attr_destroy(&attr);
#elif __APPLE__
@ -73,7 +73,7 @@ StackInfo::StackInfo()
}
m_base = top_of_stack - m_size;
#else
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
#endif
m_top = m_base + m_size;

View File

@ -57,7 +57,7 @@ constexpr T max(const T& a, const T& b)
template<typename T>
constexpr T clamp(const T& value, const T& min, const T& max)
{
ASSERT(max >= min);
VERIFY(max >= min);
if (value > max)
return max;
if (value < min)

View File

@ -37,7 +37,7 @@ namespace AK::Detail {
class Stream {
public:
virtual ~Stream() { ASSERT(!has_any_error()); }
virtual ~Stream() { VERIFY(!has_any_error()); }
virtual bool has_recoverable_error() const { return m_recoverable_error; }
virtual bool has_fatal_error() const { return m_fatal_error; }
@ -45,7 +45,7 @@ public:
virtual bool handle_recoverable_error()
{
ASSERT(!has_fatal_error());
VERIFY(!has_fatal_error());
return exchange(m_recoverable_error, false);
}
virtual bool handle_fatal_error() { return exchange(m_fatal_error, false); }

View File

@ -104,7 +104,7 @@ String String::empty()
bool String::copy_characters_to_buffer(char* buffer, size_t buffer_size) const
{
// We must fit at least the NUL-terminator.
ASSERT(buffer_size > 0);
VERIFY(buffer_size > 0);
size_t characters_to_copy = min(length(), buffer_size - 1);
__builtin_memcpy(buffer, characters(), characters_to_copy);
@ -127,8 +127,8 @@ String String::isolated_copy() const
String String::substring(size_t start) const
{
ASSERT(m_impl);
ASSERT(start <= length());
VERIFY(m_impl);
VERIFY(start <= length());
return { characters() + start, length() - start };
}
@ -136,24 +136,24 @@ String String::substring(size_t start, size_t length) const
{
if (!length)
return "";
ASSERT(m_impl);
ASSERT(start + length <= m_impl->length());
VERIFY(m_impl);
VERIFY(start + length <= m_impl->length());
// FIXME: This needs some input bounds checking.
return { characters() + start, length };
}
StringView String::substring_view(size_t start, size_t length) const
{
ASSERT(m_impl);
ASSERT(start + length <= m_impl->length());
VERIFY(m_impl);
VERIFY(start + length <= m_impl->length());
// FIXME: This needs some input bounds checking.
return { characters() + start, length };
}
StringView String::substring_view(size_t start) const
{
ASSERT(m_impl);
ASSERT(start <= length());
VERIFY(m_impl);
VERIFY(start <= length());
return { characters() + start, length() - start };
}

View File

@ -40,12 +40,12 @@ inline void StringBuilder::will_append(size_t size)
{
Checked<size_t> needed_capacity = m_length;
needed_capacity += size;
ASSERT(!needed_capacity.has_overflow());
VERIFY(!needed_capacity.has_overflow());
if (needed_capacity < inline_capacity)
return;
Checked<size_t> expanded_capacity = needed_capacity;
expanded_capacity *= 2;
ASSERT(!expanded_capacity.has_overflow());
VERIFY(!expanded_capacity.has_overflow());
if (m_buffer.is_null()) {
m_buffer.grow(expanded_capacity.value());
memcpy(m_buffer.data(), m_inline_buffer, m_length);

View File

@ -88,9 +88,9 @@ static inline size_t allocation_size_for_stringimpl(size_t length)
NonnullRefPtr<StringImpl> StringImpl::create_uninitialized(size_t length, char*& buffer)
{
ASSERT(length);
VERIFY(length);
void* slot = kmalloc(allocation_size_for_stringimpl(length));
ASSERT(slot);
VERIFY(slot);
auto new_stringimpl = adopt(*new (slot) StringImpl(ConstructWithInlineBuffer, length));
buffer = const_cast<char*>(new_stringimpl->characters());
buffer[length] = '\0';

View File

@ -66,7 +66,7 @@ public:
const char& operator[](size_t i) const
{
ASSERT(i < m_length);
VERIFY(i < m_length);
return characters()[i];
}

View File

@ -79,7 +79,7 @@ Vector<StringView> StringView::split_view(const char separator, bool keep_empty)
Vector<StringView> StringView::split_view(const StringView& separator, bool keep_empty) const
{
ASSERT(!separator.is_empty());
VERIFY(!separator.is_empty());
if (is_empty())
return {};
@ -197,20 +197,20 @@ bool StringView::equals_ignoring_case(const StringView& other) const
StringView StringView::substring_view(size_t start, size_t length) const
{
ASSERT(start + length <= m_length);
VERIFY(start + length <= m_length);
return { m_characters + start, length };
}
StringView StringView::substring_view(size_t start) const
{
ASSERT(start <= m_length);
VERIFY(start <= m_length);
return { m_characters + start, length() - start };
}
StringView StringView::substring_view_starting_from_substring(const StringView& substring) const
{
const char* remaining_characters = substring.characters_without_null_termination();
ASSERT(remaining_characters >= m_characters);
ASSERT(remaining_characters <= m_characters + m_length);
VERIFY(remaining_characters >= m_characters);
VERIFY(remaining_characters <= m_characters + m_length);
size_t remaining_length = m_length - (remaining_characters - m_characters);
return { remaining_characters, remaining_length };
}
@ -218,8 +218,8 @@ StringView StringView::substring_view_starting_from_substring(const StringView&
StringView StringView::substring_view_starting_after_substring(const StringView& substring) const
{
const char* remaining_characters = substring.characters_without_null_termination() + substring.length();
ASSERT(remaining_characters >= m_characters);
ASSERT(remaining_characters <= m_characters + m_length);
VERIFY(remaining_characters >= m_characters);
VERIFY(remaining_characters <= m_characters + m_length);
size_t remaining_length = m_length - (remaining_characters - m_characters);
return { remaining_characters, remaining_length };
}

View File

@ -42,13 +42,13 @@ public:
: m_characters(characters)
, m_length(length)
{
ASSERT(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
VERIFY(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
}
ALWAYS_INLINE StringView(const unsigned char* characters, size_t length)
: m_characters((const char*)characters)
, m_length(length)
{
ASSERT(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
VERIFY(!Checked<uintptr_t>::addition_would_overflow((uintptr_t)characters, length));
}
ALWAYS_INLINE constexpr StringView(const char* cstring)
: m_characters(cstring)

View File

@ -38,11 +38,11 @@ void warnln(CheckedFormatString<Parameters...>&& fmtstr, const Parameters&...);
using AK::warnln;
#undef ASSERT
#define ASSERT(x) \
#undef VERIFY
#define VERIFY(x) \
do { \
if (!(x)) \
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: ASSERT({}) failed", __FILE__, __LINE__, #x); \
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY({}) failed", __FILE__, __LINE__, #x); \
} while (false)
#undef RELEASE_ASSERT
@ -52,10 +52,10 @@ using AK::warnln;
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: RELEASE_ASSERT({}) failed", __FILE__, __LINE__, #x); \
} while (false)
#undef ASSERT_NOT_REACHED
#define ASSERT_NOT_REACHED() \
#undef VERIFY_NOT_REACHED
#define VERIFY_NOT_REACHED() \
do { \
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: ASSERT_NOT_REACHED() called", __FILE__, __LINE__); \
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY_NOT_REACHED() called", __FILE__, __LINE__); \
::abort(); \
} while (false)

View File

@ -177,7 +177,7 @@ TEST_CASE(pointers)
EXPECT_EQ(String::formatted("{:p}", ptr), "0x0000000000004000");
EXPECT_EQ(String::formatted("{}", ptr), "0x0000000000004000");
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View File

@ -36,7 +36,7 @@
TEST_CASE(load_form)
{
FILE* fp = fopen("test.frm", "r");
ASSERT(fp);
VERIFY(fp);
StringBuilder builder;
for (;;) {
@ -69,7 +69,7 @@ TEST_CASE(load_form)
BENCHMARK_CASE(load_4chan_catalog)
{
FILE* fp = fopen("4chan_catalog.json", "r");
ASSERT(fp);
VERIFY(fp);
StringBuilder builder;
for (;;) {

View File

@ -38,7 +38,7 @@ TEST_CASE(decode_ascii)
size_t i = 0;
for (u32 code_point : utf8) {
ASSERT(i < expected_size);
VERIFY(i < expected_size);
EXPECT_EQ(code_point, expected[i]);
i++;
}
@ -57,7 +57,7 @@ TEST_CASE(decode_utf8)
size_t i = 0;
for (u32 code_point : utf8) {
ASSERT(i < expected_size);
VERIFY(i < expected_size);
EXPECT_EQ(code_point, expected[i]);
i++;
}

View File

@ -31,7 +31,7 @@ namespace AK {
int day_of_year(int year, unsigned month, int day)
{
ASSERT(month >= 1 && month <= 12);
VERIFY(month >= 1 && month <= 12);
static const int seek_table[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
int day_of_year = seek_table[month - 1] + day - 1;
@ -44,7 +44,7 @@ int day_of_year(int year, unsigned month, int day)
int days_in_month(int year, unsigned month)
{
ASSERT(month >= 1 && month <= 12);
VERIFY(month >= 1 && month <= 12);
if (month == 2)
return is_leap_year(year) ? 29 : 28;
@ -54,7 +54,7 @@ int days_in_month(int year, unsigned month)
unsigned day_of_week(int year, unsigned month, int day)
{
ASSERT(month >= 1 && month <= 12);
VERIFY(month >= 1 && month <= 12);
static const int seek_table[] = { 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4 };
if (month < 3)
--year;

View File

@ -51,7 +51,7 @@ template<typename OutputType, typename InputType>
ALWAYS_INLINE CopyConst<InputType, OutputType>* downcast(InputType* input)
{
static_assert(IsBaseOf<InputType, OutputType>::value);
ASSERT(!input || is<OutputType>(*input));
VERIFY(!input || is<OutputType>(*input));
return static_cast<CopyConst<InputType, OutputType>*>(input);
}
@ -59,7 +59,7 @@ template<typename OutputType, typename InputType>
ALWAYS_INLINE CopyConst<InputType, OutputType>& downcast(InputType& input)
{
static_assert(IsBaseOf<InputType, OutputType>::value);
ASSERT(is<OutputType>(input));
VERIFY(is<OutputType>(input));
return static_cast<CopyConst<InputType, OutputType>&>(input);
}

View File

@ -41,7 +41,7 @@ UUID::UUID(Array<u8, 16> uuid_buffer)
void UUID::convert_string_view_to_uuid(const StringView& uuid_string_view)
{
ASSERT(uuid_string_view.length() == 36);
VERIFY(uuid_string_view.length() == 36);
auto first_unit = decode_hex(uuid_string_view.substring_view(0, 8));
auto second_unit = decode_hex(uuid_string_view.substring_view(9, 4));
auto third_unit = decode_hex(uuid_string_view.substring_view(14, 4));

View File

@ -51,7 +51,7 @@ public:
}
Utf32CodepointIterator& operator++()
{
ASSERT(m_length > 0);
VERIFY(m_length > 0);
m_ptr++;
m_length--;
return *this;
@ -62,7 +62,7 @@ public:
}
u32 operator*() const
{
ASSERT(m_length > 0);
VERIFY(m_length > 0);
return *m_ptr;
}
@ -88,7 +88,7 @@ public:
: m_code_points(code_points)
, m_length(length)
{
ASSERT(code_points || length == 0);
VERIFY(code_points || length == 0);
}
Utf32CodepointIterator begin() const
@ -107,8 +107,8 @@ public:
size_t iterator_offset(const Utf32CodepointIterator& it) const
{
ASSERT(it.m_ptr >= m_code_points);
ASSERT(it.m_ptr < m_code_points + m_length);
VERIFY(it.m_ptr >= m_code_points);
VERIFY(it.m_ptr < m_code_points + m_length);
return ((ptrdiff_t)it.m_ptr - (ptrdiff_t)m_code_points) / sizeof(u32);
}
@ -116,9 +116,9 @@ public:
{
if (length == 0)
return {};
ASSERT(offset < m_length);
ASSERT(!Checked<size_t>::addition_would_overflow(offset, length));
ASSERT((offset + length) <= m_length);
VERIFY(offset < m_length);
VERIFY(!Checked<size_t>::addition_would_overflow(offset, length));
VERIFY((offset + length) <= m_length);
return Utf32View(m_code_points + offset, length);
}

View File

@ -67,8 +67,8 @@ Utf8CodepointIterator Utf8View::end() const
size_t Utf8View::byte_offset_of(const Utf8CodepointIterator& it) const
{
ASSERT(it.m_ptr >= begin_ptr());
ASSERT(it.m_ptr <= end_ptr());
VERIFY(it.m_ptr >= begin_ptr());
VERIFY(it.m_ptr <= end_ptr());
return it.m_ptr - begin_ptr();
}
@ -162,15 +162,15 @@ bool Utf8CodepointIterator::operator!=(const Utf8CodepointIterator& other) const
Utf8CodepointIterator& Utf8CodepointIterator::operator++()
{
ASSERT(m_length > 0);
VERIFY(m_length > 0);
size_t code_point_length_in_bytes = 0;
u32 value;
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
ASSERT(first_byte_makes_sense);
VERIFY(first_byte_makes_sense);
ASSERT(code_point_length_in_bytes <= m_length);
VERIFY(code_point_length_in_bytes <= m_length);
m_ptr += code_point_length_in_bytes;
m_length -= code_point_length_in_bytes;
@ -179,17 +179,17 @@ Utf8CodepointIterator& Utf8CodepointIterator::operator++()
size_t Utf8CodepointIterator::code_point_length_in_bytes() const
{
ASSERT(m_length > 0);
VERIFY(m_length > 0);
size_t code_point_length_in_bytes = 0;
u32 value;
bool first_byte_makes_sense = decode_first_byte(*m_ptr, code_point_length_in_bytes, value);
ASSERT(first_byte_makes_sense);
VERIFY(first_byte_makes_sense);
return code_point_length_in_bytes;
}
u32 Utf8CodepointIterator::operator*() const
{
ASSERT(m_length > 0);
VERIFY(m_length > 0);
u32 code_point_value_so_far = 0;
size_t code_point_length_in_bytes = 0;
@ -197,13 +197,13 @@ u32 Utf8CodepointIterator::operator*() const
bool first_byte_makes_sense = decode_first_byte(m_ptr[0], code_point_length_in_bytes, code_point_value_so_far);
if (!first_byte_makes_sense)
dbgln("First byte doesn't make sense, bytes: {}", StringView { (const char*)m_ptr, m_length });
ASSERT(first_byte_makes_sense);
VERIFY(first_byte_makes_sense);
if (code_point_length_in_bytes > m_length)
dbgln("Not enough bytes (need {}, have {}), first byte is: {:#02x}, '{}'", code_point_length_in_bytes, m_length, m_ptr[0], (const char*)m_ptr);
ASSERT(code_point_length_in_bytes <= m_length);
VERIFY(code_point_length_in_bytes <= m_length);
for (size_t offset = 1; offset < code_point_length_in_bytes; offset++) {
ASSERT(m_ptr[offset] >> 6 == 2);
VERIFY(m_ptr[offset] >> 6 == 2);
code_point_value_so_far <<= 6;
code_point_value_so_far |= m_ptr[offset] & 63;
}

View File

@ -191,12 +191,12 @@ public:
ALWAYS_INLINE const T& at(size_t i) const
{
ASSERT(i < m_size);
VERIFY(i < m_size);
return data()[i];
}
ALWAYS_INLINE T& at(size_t i)
{
ASSERT(i < m_size);
VERIFY(i < m_size);
return data()[i];
}
@ -211,7 +211,7 @@ public:
T take_last()
{
ASSERT(!is_empty());
VERIFY(!is_empty());
T value = move(last());
last().~T();
--m_size;
@ -220,7 +220,7 @@ public:
T take_first()
{
ASSERT(!is_empty());
VERIFY(!is_empty());
T value = move(first());
remove(0);
return value;
@ -235,14 +235,14 @@ public:
T unstable_take(size_t index)
{
ASSERT(index < m_size);
VERIFY(index < m_size);
swap(at(index), at(m_size - 1));
return take_last();
}
void remove(size_t index)
{
ASSERT(index < m_size);
VERIFY(index < m_size);
if constexpr (Traits<T>::is_trivial()) {
TypedTransfer<T>::copy(slot(index), slot(index + 1), m_size - index - 1);
@ -261,8 +261,8 @@ public:
{
if (count == 0)
return;
ASSERT(index + count > index);
ASSERT(index + count <= m_size);
VERIFY(index + count > index);
VERIFY(index + count <= m_size);
if constexpr (Traits<T>::is_trivial()) {
TypedTransfer<T>::copy(slot(index), slot(index + count), m_size - index - count);
@ -281,7 +281,7 @@ public:
template<typename U = T>
void insert(size_t index, U&& value)
{
ASSERT(index <= size());
VERIFY(index <= size());
if (index == size())
return append(forward<U>(value));
grow_capacity(size() + 1);
@ -403,7 +403,7 @@ public:
template<typename U = T>
ALWAYS_INLINE void unchecked_append(U&& value)
{
ASSERT((size() + 1) <= capacity());
VERIFY((size() + 1) <= capacity());
new (slot(m_size)) T(forward<U>(value));
++m_size;
}
@ -506,7 +506,7 @@ public:
void shrink(size_t new_size, bool keep_capacity = false)
{
ASSERT(new_size <= size());
VERIFY(new_size <= size());
if (new_size == size())
return;

View File

@ -229,7 +229,7 @@ inline WeakPtr<U> Weakable<T>::make_weak_ptr() const
if (static_cast<const T*>(this)->unref()) {
// We just dropped the last reference, which should have called
// revoke_weak_ptrs, which should have invalidated our weak_ptr
ASSERT(!weak_ptr.strong_ref());
VERIFY(!weak_ptr.strong_ref());
return {};
}
}

View File

@ -91,7 +91,7 @@ public:
void revoke()
{
auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
ASSERT(!(current_consumers & 1u));
VERIFY(!(current_consumers & 1u));
// We flagged revokation, now wait until everyone trying to obtain
// a strong reference is done
while (current_consumers > 0) {

View File

@ -40,39 +40,39 @@ UNMAP_AFTER_INIT DynamicParser::DynamicParser(PhysicalAddress rsdp)
void DynamicParser::handle_irq(const RegisterState&)
{
// FIXME: Implement IRQ handling of ACPI signals!
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation()
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation(File&)
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation(u8*, u32)
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::disable_aml_interpretation()
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::try_acpi_shutdown()
{
// FIXME: Implement AML Interpretation to perform ACPI shutdown
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::build_namespace()
{
// FIXME: Implement AML Interpretation to build the ACPI namespace
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View File

@ -96,7 +96,7 @@ UNMAP_AFTER_INIT void MultiProcessorParser::parse_configuration_table()
entry = (MultiProcessor::EntryHeader*)(FlatPtr)entry + sizeof(MultiProcessor::CompatibilityBusAddressSpaceModifierEntry);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
--entry_count;
}

View File

@ -47,7 +47,7 @@ Parser* Parser::the()
void Parser::set_the(Parser& parser)
{
ASSERT(!s_acpi_parser);
VERIFY(!s_acpi_parser);
s_acpi_parser = &parser;
}
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT void Parser::init_fadt()
klog() << "ACPI: Searching for the Fixed ACPI Data Table";
m_fadt = find_table("FACP");
ASSERT(!m_fadt.is_null());
VERIFY(!m_fadt.is_null());
auto sdt = map_typed<Structures::FADT>(m_fadt);
@ -148,13 +148,13 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
switch (structure.access_size) {
case (u8)GenericAddressStructure::AccessSize::QWord: {
dbgln("Trying to send QWord to IO port");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
break;
}
case (u8)GenericAddressStructure::AccessSize::Undefined: {
dbgln("ACPI Warning: Unknown access size {}", structure.access_size);
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
dbgln("ACPI: Bit Width - {} bits", structure.bit_width);
address.out(value, structure.bit_width);
break;
@ -182,7 +182,7 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
break;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return;
}
@ -193,16 +193,16 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
u32 offset_in_pci_address = structure.address & 0xFFFF;
if (structure.access_size == (u8)GenericAddressStructure::AccessSize::QWord) {
dbgln("Trying to send QWord to PCI configuration space");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
VERIFY(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
PCI::raw_access(pci_address, offset_in_pci_address, (1 << (structure.access_size - 1)), value);
return;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool Parser::validate_reset_register()
@ -222,7 +222,7 @@ void Parser::try_acpi_reboot()
dbgln_if(ACPI_DEBUG, "ACPI: Rebooting, Probing FADT ({})", m_fadt);
auto fadt = map_typed<Structures::FADT>(m_fadt);
ASSERT(validate_reset_register());
VERIFY(validate_reset_register());
access_generic_address(fadt->reset_reg, fadt->reset_value);
Processor::halt();
}
@ -255,7 +255,7 @@ UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
#if ACPI_DEBUG
dbgln("ACPI: Checking Main SDT Length to choose the correct mapping size");
#endif
ASSERT(!m_main_system_description_table.is_null());
VERIFY(!m_main_system_description_table.is_null());
auto length = get_table_size(m_main_system_description_table);
auto revision = get_table_revision(m_main_system_description_table);
@ -333,7 +333,7 @@ UNMAP_AFTER_INIT Optional<PhysicalAddress> StaticParsing::find_rsdp()
UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto rsdp = map_typed<Structures::RSDPDescriptor20>(rsdp_address);
@ -345,13 +345,13 @@ UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_
return search_table_in_xsdt(PhysicalAddress(rsdp->xsdt_ptr), signature);
return search_table_in_rsdt(PhysicalAddress(rsdp->base.rsdt_ptr), signature);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsdt_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto xsdt = map_typed<Structures::XSDT>(xsdt_address);
@ -365,7 +365,7 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsd
static bool match_table_signature(PhysicalAddress table_header, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto table = map_typed<Structures::RSDT>(table_header);
return !strncmp(table->h.sig, signature.characters_without_null_termination(), 4);
@ -374,7 +374,7 @@ static bool match_table_signature(PhysicalAddress table_header, const StringView
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsdt_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto rsdt = map_typed<Structures::RSDT>(rsdt_address);
@ -387,22 +387,22 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsd
void Parser::enable_aml_interpretation()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::enable_aml_interpretation(File&)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::enable_aml_interpretation(u8*, u32)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::disable_aml_interpretation()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View File

@ -319,7 +319,7 @@ void page_fault_handler(TrapFrame* trap)
dbgln("Continuing after resolved page fault");
#endif
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}
@ -390,7 +390,7 @@ static void unimp_trap()
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
{
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
return *s_interrupt_handler[interrupt_number];
}
@ -401,14 +401,14 @@ static void revert_to_unused_handler(u8 interrupt_number)
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
ASSERT(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
if (s_interrupt_handler[interrupt_number] != nullptr) {
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
s_interrupt_handler[interrupt_number] = &handler;
return;
}
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
@ -417,7 +417,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
static_cast<SpuriousInterruptHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
auto& previous_handler = *s_interrupt_handler[interrupt_number];
s_interrupt_handler[interrupt_number] = nullptr;
SharedIRQHandler::initialize(interrupt_number);
@ -425,7 +425,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
} else {
s_interrupt_handler[interrupt_number] = &handler;
}
@ -433,13 +433,13 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
dbgln("Trying to unregister unused handler (?)");
return;
}
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->unregister_handler(handler);
if (!static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->sharing_devices_count()) {
revert_to_unused_handler(interrupt_number);
@ -447,11 +447,11 @@ void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptH
return;
}
if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
revert_to_unused_handler(interrupt_number);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*f)())
@ -692,11 +692,11 @@ void handle_interrupt(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
ASSERT(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
u8 irq = (u8)(regs.isr_number - 0x50);
s_entropy_source_interrupts.add_random_event(irq);
auto* handler = s_interrupt_handler[irq];
ASSERT(handler);
VERIFY(handler);
handler->increment_invoking_counter();
handler->handle_interrupt(regs);
handler->eoi();
@ -792,7 +792,7 @@ static volatile bool s_smp_enabled;
Vector<Processor*>& Processor::processors()
{
ASSERT(s_processors);
VERIFY(s_processors);
return *s_processors;
}
@ -803,8 +803,8 @@ Processor& Processor::by_id(u32 cpu)
// for all APs to finish, after which this array never gets modified
// again, so it's safe to not protect access to it here
auto& procs = processors();
ASSERT(procs[cpu] != nullptr);
ASSERT(procs.size() > cpu);
VERIFY(procs[cpu] != nullptr);
VERIFY(procs.size() > cpu);
return *procs[cpu];
}
@ -861,7 +861,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
u32 max_extended_leaf = CPUID(0x80000000).eax();
ASSERT(max_extended_leaf >= 0x80000001);
VERIFY(max_extended_leaf >= 0x80000001);
CPUID extended_processor_info(0x80000001);
if (extended_processor_info.edx() & (1 << 20))
set_feature(CPUFeature::NX);
@ -1049,14 +1049,14 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
cpu_setup();
gdt_init();
ASSERT(is_initialized()); // sanity check
ASSERT(&current() == this); // sanity check
VERIFY(is_initialized()); // sanity check
VERIFY(&current() == this); // sanity check
}
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
{
ASSERT(m_self == this);
ASSERT(&current() == this); // sanity check
VERIFY(m_self == this);
VERIFY(&current() == this); // sanity check
dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
if (!has_feature(CPUFeature::RDRAND))
@ -1069,7 +1069,7 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
flush_idt();
if (cpu == 0) {
ASSERT((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
asm volatile("fninit");
asm volatile("fxsave %0"
: "=m"(s_clean_fpu_state));
@ -1095,7 +1095,7 @@ void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
if (i > m_gdt_length) {
m_gdt_length = i + 1;
ASSERT(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
}
m_gdt[i].low = low;
@ -1178,14 +1178,14 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// reflect the status at the last context switch.
ScopedSpinLock lock(g_scheduler_lock);
if (&thread == Processor::current_thread()) {
ASSERT(thread.state() == Thread::Running);
VERIFY(thread.state() == Thread::Running);
// Leave the scheduler lock. If we trigger page faults we may
// need to be preempted. Since this is our own thread it won't
// cause any problems as the stack won't change below this frame.
lock.unlock();
capture_current_thread();
} else if (thread.is_active()) {
ASSERT(thread.cpu() != Processor::id());
VERIFY(thread.cpu() != Processor::id());
// If this is the case, the thread is currently running
// on another processor. We can't trust the kernel stack as
// it may be changing at any time. We need to probably send
@ -1197,8 +1197,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
[&]() {
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
ProcessPagingScope paging_scope(thread.process());
ASSERT(&Processor::current() != &proc);
ASSERT(&thread == Processor::current_thread());
VERIFY(&Processor::current() != &proc);
VERIFY(&thread == Processor::current_thread());
// NOTE: Because the other processor is still holding the
// scheduler lock while waiting for this callback to finish,
// the current thread on the target processor cannot change
@ -1212,7 +1212,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
} else {
switch (thread.state()) {
case Thread::Running:
ASSERT_NOT_REACHED(); // should have been handled above
VERIFY_NOT_REACHED(); // should have been handled above
case Thread::Runnable:
case Thread::Stopped:
case Thread::Blocked:
@ -1251,8 +1251,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
ASSERT(to_thread->state() == Thread::Running);
VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
VERIFY(to_thread->state() == Thread::Running);
Processor::set_current_thread(*to_thread);
@ -1287,9 +1287,9 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{
ASSERT(!in_irq());
ASSERT(m_in_critical == 1);
ASSERT(is_kernel_mode());
VERIFY(!in_irq());
VERIFY(m_in_critical == 1);
VERIFY(is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
from_thread->save_critical(m_in_critical);
@ -1344,12 +1344,12 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
{
ASSERT(!are_interrupts_enabled());
ASSERT(is_kernel_mode());
VERIFY(!are_interrupts_enabled());
VERIFY(is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
ASSERT(to_thread == Thread::current());
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread, true);
@ -1388,13 +1388,13 @@ void exit_kernel_thread(void)
u32 Processor::init_context(Thread& thread, bool leave_crit)
{
ASSERT(is_kernel_mode());
ASSERT(g_scheduler_lock.is_locked());
VERIFY(is_kernel_mode());
VERIFY(g_scheduler_lock.is_locked());
if (leave_crit) {
// Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags
ASSERT(in_critical() == 1);
VERIFY(in_critical() == 1);
}
u32 kernel_stack_top = thread.kernel_stack_top();
@ -1405,7 +1405,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
u32 stack_top = kernel_stack_top;
// TODO: handle NT?
ASSERT((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
@ -1503,7 +1503,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
thread->tss().eflags = flags;
return Processor::current().init_context(*thread, true);
}
@ -1536,18 +1536,18 @@ void Processor::assume_context(Thread& thread, u32 flags)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
Scheduler::prepare_after_exec();
// in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock
ASSERT(Processor::current().in_critical() == 2);
VERIFY(Processor::current().in_critical() == 2);
do_assume_context(&thread, flags);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
{
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
// Because init_finished() will wait on the other APs, we need
// to release the scheduler lock so that the other APs can also get
@ -1567,7 +1567,7 @@ extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
{
ASSERT(initial_thread.process().is_kernel_process());
VERIFY(initial_thread.process().is_kernel_process());
auto& tss = initial_thread.tss();
m_tss = tss;
@ -1605,13 +1605,13 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
);
// clang-format on
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(&Processor::current() == this);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
trap.prev_irq_level = m_in_irq;
if (raise_irq)
m_in_irq++;
@ -1629,9 +1629,9 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
void Processor::exit_trap(TrapFrame& trap)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(&Processor::current() == this);
ASSERT(m_in_irq >= trap.prev_irq_level);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
VERIFY(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level;
smp_process_pending_messages();
@ -1644,7 +1644,7 @@ void Processor::exit_trap(TrapFrame& trap)
auto& current_trap = current_thread->current_trap();
current_trap = trap.next_trap;
if (current_trap) {
ASSERT(current_trap->regs);
VERIFY(current_trap->regs);
// If we have another higher level trap then we probably returned
// from an interrupt or irq handler. The cs register of the
// new/higher level trap tells us what the mode prior to it was
@ -1659,8 +1659,8 @@ void Processor::exit_trap(TrapFrame& trap)
void Processor::check_invoke_scheduler()
{
ASSERT(!m_in_irq);
ASSERT(!m_in_critical);
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
@ -1724,7 +1724,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
}
}
ASSERT(msg != nullptr);
VERIFY(msg != nullptr);
return *msg;
}
@ -1732,15 +1732,15 @@ Atomic<u32> Processor::s_idle_cpu_mask { 0 };
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{
ASSERT(Processor::current().in_critical());
ASSERT(wake_count > 0);
VERIFY(Processor::current().in_critical());
VERIFY(wake_count > 0);
if (!s_smp_enabled)
return 0;
// Wake at most N - 1 processors
if (wake_count >= Processor::count()) {
wake_count = Processor::count() - 1;
ASSERT(wake_count > 0);
VERIFY(wake_count > 0);
}
u32 current_id = Processor::current().id();
@ -1853,7 +1853,7 @@ bool Processor::smp_process_pending_messages()
case ProcessorMessage::FlushTlb:
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
// We assume that we don't cross into kernel land!
ASSERT(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
// This processor isn't using this page directory right now, we can ignore this request
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
@ -1866,7 +1866,7 @@ bool Processor::smp_process_pending_messages()
bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(prev_refs != 0);
VERIFY(prev_refs != 0);
if (prev_refs == 1) {
// All processors handled this. If this is an async message,
// we need to clean it up and return it to the pool
@ -1894,7 +1894,7 @@ bool Processor::smp_queue_message(ProcessorMessage& msg)
// the queue at any given time. We rely on the fact that the messages
// are pooled and never get freed!
auto& msg_entry = msg.per_proc_entries[id()];
ASSERT(msg_entry.msg == &msg);
VERIFY(msg_entry.msg == &msg);
ProcessorMessageEntry* next = nullptr;
do {
msg_entry.next = next;
@ -1909,7 +1909,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
ASSERT(msg.refs > 0);
VERIFY(msg.refs > 0);
bool need_broadcast = false;
for_each(
[&](Processor& proc) -> IterationDecision {
@ -1928,7 +1928,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
{
auto& cur_proc = Processor::current();
ASSERT(!msg.async);
VERIFY(!msg.async);
// If synchronous then we must cleanup and return the message back
// to the pool. Otherwise, the last processor to complete it will return it
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
@ -1971,7 +1971,7 @@ void Processor::smp_broadcast(void (*callback)(), bool async)
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
{
auto& cur_proc = Processor::current();
ASSERT(cpu != cur_proc.get_id());
VERIFY(cpu != cur_proc.get_id());
auto& target_proc = processors()[cpu];
msg.async = async;
@ -2068,8 +2068,8 @@ UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
{
ASSERT(m_in_critical);
ASSERT(!entry->was_allocated);
VERIFY(m_in_critical);
VERIFY(!entry->was_allocated);
entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry;
@ -2077,13 +2077,13 @@ void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
DeferredCallEntry* Processor::deferred_call_get_free()
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
if (m_free_deferred_call_pool_entry) {
// Fast path, we have an entry in our pool
auto* entry = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry->next;
ASSERT(!entry->was_allocated);
VERIFY(!entry->was_allocated);
return entry;
}
@ -2094,7 +2094,7 @@ DeferredCallEntry* Processor::deferred_call_get_free()
void Processor::deferred_call_execute_pending()
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
if (!m_pending_deferred_calls)
return;
@ -2137,7 +2137,7 @@ void Processor::deferred_call_execute_pending()
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
entry->next = m_pending_deferred_calls;
m_pending_deferred_calls = entry;
}

View File

@ -912,14 +912,14 @@ public:
ALWAYS_INLINE void restore_irq(u32 prev_irq)
{
ASSERT(prev_irq <= m_in_irq);
VERIFY(prev_irq <= m_in_irq);
if (!prev_irq) {
u32 prev_critical = 0;
if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
m_in_irq = prev_irq;
deferred_call_execute_pending();
auto prev_raised = m_in_critical.exchange(prev_critical);
ASSERT(prev_raised == prev_critical + 1);
VERIFY(prev_raised == prev_critical + 1);
check_invoke_scheduler();
} else if (prev_critical == 0) {
check_invoke_scheduler();
@ -949,11 +949,11 @@ public:
ALWAYS_INLINE void leave_critical(u32 prev_flags)
{
cli(); // Need to prevent IRQs from interrupting us here!
ASSERT(m_in_critical > 0);
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (!m_in_irq) {
deferred_call_execute_pending();
ASSERT(m_in_critical == 1);
VERIFY(m_in_critical == 1);
}
m_in_critical--;
if (!m_in_irq)
@ -981,7 +981,7 @@ public:
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
{
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
ASSERT(!prev_crit || !(prev_flags & 0x200));
VERIFY(!prev_crit || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
@ -1105,14 +1105,14 @@ public:
void leave()
{
ASSERT(m_valid);
VERIFY(m_valid);
m_valid = false;
Processor::current().leave_critical(m_prev_flags);
}
void enter()
{
ASSERT(!m_valid);
VERIFY(!m_valid);
m_valid = true;
Processor::current().enter_critical(m_prev_flags);
}

View File

@ -52,7 +52,7 @@ ProcessorInfo::ProcessorInfo(Processor& processor)
m_cpuid = builder.build();
}
{
ASSERT(max_leaf >= 1);
VERIFY(max_leaf >= 1);
CPUID cpuid(1);
m_stepping = cpuid.eax() & 0xf;
u32 model = (cpuid.eax() >> 4) & 0xf;

View File

@ -31,11 +31,11 @@
#ifdef DEBUG
[[noreturn]] void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func);
# define ASSERT(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
# define ASSERT_NOT_REACHED() ASSERT(false)
# define VERIFY(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
# define VERIFY_NOT_REACHED() VERIFY(false)
#else
# define ASSERT(expr)
# define ASSERT_NOT_REACHED() CRASH()
# define VERIFY(expr)
# define VERIFY_NOT_REACHED() CRASH()
#endif
#define CRASH() \
do { \
@ -47,6 +47,6 @@
CRASH(); \
} while (0)
#define ASSERT_INTERRUPTS_DISABLED() ASSERT(!(cpu_flags() & 0x200))
#define ASSERT_INTERRUPTS_ENABLED() ASSERT(cpu_flags() & 0x200)
#define TODO ASSERT_NOT_REACHED
#define VERIFY_INTERRUPTS_DISABLED() VERIFY(!(cpu_flags() & 0x200))
#define VERIFY_INTERRUPTS_ENABLED() VERIFY(cpu_flags() & 0x200)
#define TODO VERIFY_NOT_REACHED

View File

@ -45,13 +45,13 @@ UNMAP_AFTER_INIT void CommandLine::early_initialize(const char* cmd_line)
const CommandLine& kernel_command_line()
{
ASSERT(s_the);
VERIFY(s_the);
return *s_the;
}
UNMAP_AFTER_INIT void CommandLine::initialize()
{
ASSERT(!s_the);
VERIFY(!s_the);
s_the = new CommandLine(s_cmd_line);
}

View File

@ -81,7 +81,7 @@ size_t DMIExpose::structure_table_length() const
UNMAP_AFTER_INIT void DMIExpose::initialize_exposer()
{
ASSERT(!(m_entry_point.is_null()));
VERIFY(!(m_entry_point.is_null()));
if (m_using_64bit_entry_point) {
set_64_bit_entry_initialization_values();
} else {

View File

@ -39,8 +39,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
{
{
ScopedSpinLock lock(m_lock);
ASSERT(is_completed_result(m_result));
ASSERT(m_sub_requests_pending.is_empty());
VERIFY(is_completed_result(m_result));
VERIFY(m_sub_requests_pending.is_empty());
}
// We should not need any locking here anymore. The destructor should
@ -50,8 +50,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
// Which means there should be no more pending sub-requests and the
// entire AsyncDeviceRequest hierarchy should be immutable.
for (auto& sub_request : m_sub_requests_complete) {
ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
ASSERT(sub_request.m_parent_request == this);
VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
VERIFY(sub_request.m_parent_request == this);
sub_request.m_parent_request = nullptr;
}
}
@ -70,7 +70,7 @@ void AsyncDeviceRequest::request_finished()
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
{
ASSERT(!m_parent_request);
VERIFY(!m_parent_request);
auto request_result = get_request_result();
if (is_completed_result(request_result))
return { request_result, Thread::BlockResult::NotBlocked };
@ -87,14 +87,14 @@ auto AsyncDeviceRequest::get_request_result() const -> RequestResult
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
{
// Sub-requests cannot be for the same device
ASSERT(&m_device != &sub_request->m_device);
ASSERT(sub_request->m_parent_request == nullptr);
VERIFY(&m_device != &sub_request->m_device);
VERIFY(sub_request->m_parent_request == nullptr);
sub_request->m_parent_request = this;
bool should_start;
{
ScopedSpinLock lock(m_lock);
ASSERT(!is_completed_result(m_result));
VERIFY(!is_completed_result(m_result));
m_sub_requests_pending.append(sub_request);
should_start = (m_result == Started);
}
@ -107,7 +107,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
bool all_completed;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
VERIFY(m_result == Started);
size_t index;
for (index = 0; index < m_sub_requests_pending.size(); index++) {
if (&m_sub_requests_pending[index] == &sub_request) {
@ -117,7 +117,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
break;
}
}
ASSERT(index < m_sub_requests_pending.size());
VERIFY(index < m_sub_requests_pending.size());
all_completed = m_sub_requests_pending.is_empty();
if (all_completed) {
// Aggregate any errors
@ -126,7 +126,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
for (index = 0; index < m_sub_requests_complete.size(); index++) {
auto& sub_request = m_sub_requests_complete[index];
auto sub_result = sub_request.get_request_result();
ASSERT(is_completed_result(sub_result));
VERIFY(is_completed_result(sub_result));
switch (sub_result) {
case Failure:
any_failures = true;
@ -154,11 +154,11 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
void AsyncDeviceRequest::complete(RequestResult result)
{
ASSERT(result == Success || result == Failure || result == MemoryFault);
VERIFY(result == Success || result == Failure || result == MemoryFault);
ScopedCritical critical;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
VERIFY(m_result == Started);
m_result = result;
}
if (Processor::current().in_irq()) {

View File

@ -87,7 +87,7 @@ public:
void set_private(void* priv)
{
ASSERT(!m_private || !priv);
VERIFY(!m_private || !priv);
m_private = priv;
}
void* get_private() const { return m_private; }

View File

@ -101,7 +101,7 @@ u16 BXVGADevice::get_register(u16 index)
void BXVGADevice::revert_resolution()
{
set_resolution_registers(m_framebuffer_width, m_framebuffer_height);
ASSERT(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
VERIFY(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
}
void BXVGADevice::set_resolution_registers(size_t width, size_t height)
@ -152,7 +152,7 @@ bool BXVGADevice::validate_setup_resolution(size_t width, size_t height)
void BXVGADevice::set_y_offset(size_t y_offset)
{
ASSERT(y_offset == 0 || y_offset == m_framebuffer_height);
VERIFY(y_offset == 0 || y_offset == m_framebuffer_height);
m_y_offset = y_offset;
set_register(VBE_DISPI_INDEX_Y_OFFSET, (u16)y_offset);
}

View File

@ -64,7 +64,7 @@ bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer)
dbgln("BlockDevice::read_block({}) cancelled", index);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return false;
}
@ -85,7 +85,7 @@ bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer)
dbgln("BlockDevice::write_block({}) cancelled", index);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return false;
}

View File

@ -57,7 +57,7 @@ public:
case Write:
return "BlockDeviceRequest (write)";
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View File

@ -61,7 +61,7 @@ Device::Device(unsigned major, unsigned minor)
if (it != all_devices().end()) {
dbgln("Already registered {},{}: {}", major, minor, it->value->class_name());
}
ASSERT(!all_devices().contains(device_id));
VERIFY(!all_devices().contains(device_id));
all_devices().set(device_id, this);
}
@ -86,8 +86,8 @@ void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncD
{
ScopedSpinLock lock(m_requests_lock);
ASSERT(!m_requests.is_empty());
ASSERT(m_requests.first().ptr() == &completed_request);
VERIFY(!m_requests.is_empty());
VERIFY(m_requests.first().ptr() == &completed_request);
m_requests.remove(m_requests.begin());
if (!m_requests.is_empty())
next_request = m_requests.first().ptr();

View File

@ -41,13 +41,13 @@ UNMAP_AFTER_INIT void I8042Controller::initialize()
I8042Controller& I8042Controller::the()
{
ASSERT(s_the);
VERIFY(s_the);
return *s_the;
}
UNMAP_AFTER_INIT I8042Controller::I8042Controller()
{
ASSERT(!s_the);
VERIFY(!s_the);
s_the = this;
u8 configuration;
@ -148,7 +148,7 @@ UNMAP_AFTER_INIT I8042Controller::I8042Controller()
void I8042Controller::irq_process_input_buffer(Device)
{
ASSERT(Processor::current().in_irq());
VERIFY(Processor::current().in_irq());
u8 status = IO::in8(I8042_STATUS);
if (!(status & I8042_BUFFER_FULL))
@ -171,10 +171,10 @@ void I8042Controller::do_drain()
bool I8042Controller::do_reset_device(Device device)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
if (do_send_command(device, 0xff) != I8042_ACK)
return false;
// Wait until we get the self-test result
@ -183,20 +183,20 @@ bool I8042Controller::do_reset_device(Device device)
u8 I8042Controller::do_send_command(Device device, u8 command)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
return do_write_to_device(device, command);
}
u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
u8 response = do_write_to_device(device, command);
if (response == I8042_ACK)
@ -206,10 +206,10 @@ u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
u8 I8042Controller::do_write_to_device(Device device, u8 data)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
int attempts = 0;
u8 response;
@ -230,7 +230,7 @@ u8 I8042Controller::do_write_to_device(Device device, u8 data)
u8 I8042Controller::do_read_from_device(Device device)
{
ASSERT(device != Device::None);
VERIFY(device != Device::None);
prepare_for_input(device);
return IO::in8(I8042_BUFFER);
@ -238,7 +238,7 @@ u8 I8042Controller::do_read_from_device(Device device)
void I8042Controller::prepare_for_input(Device device)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
const u8 buffer_type = device == Device::Keyboard ? I8042_KEYBOARD_BUFFER : I8042_MOUSE_BUFFER;
for (;;) {
u8 status = IO::in8(I8042_STATUS);
@ -249,7 +249,7 @@ void I8042Controller::prepare_for_input(Device device)
void I8042Controller::prepare_for_output()
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
for (;;) {
if (!(IO::in8(I8042_STATUS) & 2))
return;
@ -258,14 +258,14 @@ void I8042Controller::prepare_for_output()
void I8042Controller::do_wait_then_write(u8 port, u8 data)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
prepare_for_output();
IO::out8(port, data);
}
u8 I8042Controller::do_wait_then_read(u8 port)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
prepare_for_input(Device::None);
return IO::in8(port);
}

View File

@ -113,7 +113,7 @@ private:
static int device_to_deviceinfo_index(Device device)
{
ASSERT(device != Device::None);
VERIFY(device != Device::None);
return (device == Device::Keyboard) ? 0 : 1;
}

View File

@ -450,7 +450,7 @@ KResultOr<size_t> KeyboardDevice::read(FileDescription&, size_t, UserOrKernelBuf
});
if (n < 0)
return KResult((ErrnoCode)-n);
ASSERT((size_t)n == sizeof(Event));
VERIFY((size_t)n == sizeof(Event));
nread += sizeof(Event);
lock.lock();

View File

@ -114,7 +114,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
evaluate_block_conditions();
};
ASSERT(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
VERIFY(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
m_data.bytes[m_data_state] = byte;
switch (m_data_state) {
@ -136,7 +136,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
commit_packet();
break;
case 3:
ASSERT(m_has_wheel);
VERIFY(m_has_wheel);
commit_packet();
break;
}
@ -275,7 +275,7 @@ bool PS2MouseDevice::can_read(const FileDescription&, size_t) const
KResultOr<size_t> PS2MouseDevice::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t size)
{
ASSERT(size > 0);
VERIFY(size > 0);
size_t nread = 0;
size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
ScopedSpinLock lock(m_queue_lock);

View File

@ -153,7 +153,7 @@ void SB16::set_irq_register(u8 irq_number)
bitmask = 0b1000;
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
IO::out8(0x224, 0x80);
IO::out8(0x225, bitmask);
@ -258,7 +258,7 @@ KResultOr<size_t> SB16::write(FileDescription&, size_t, const UserOrKernelBuffer
#if SB16_DEBUG
klog() << "SB16: Writing buffer of " << length << " bytes";
#endif
ASSERT(length <= PAGE_SIZE);
VERIFY(length <= PAGE_SIZE);
const int BLOCK_SIZE = 32 * 1024;
if (length > BLOCK_SIZE) {
return ENOSPC;

View File

@ -296,7 +296,7 @@ QueueHead* UHCIController::allocate_queue_head() const
}
}
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
return nullptr; // Huh!? We're outta queue heads!
}
@ -312,7 +312,7 @@ TransferDescriptor* UHCIController::allocate_transfer_descriptor() const
}
}
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
return nullptr; // Huh?! We're outta TDs!!
}

View File

@ -105,13 +105,13 @@ struct alignas(16) TransferDescriptor final {
void set_in_use(bool in_use) { m_in_use = in_use; }
void set_max_len(u16 max_len)
{
ASSERT(max_len < 0x500 || max_len == 0x7ff);
VERIFY(max_len < 0x500 || max_len == 0x7ff);
m_token |= (max_len << 21);
}
void set_device_address(u8 address)
{
ASSERT(address <= 0x7f);
VERIFY(address <= 0x7f);
m_token |= (address << 8);
}

View File

@ -53,7 +53,7 @@ void DoubleBuffer::flip()
{
if (m_storage.is_null())
return;
ASSERT(m_read_buffer_index == m_read_buffer->size);
VERIFY(m_read_buffer_index == m_read_buffer->size);
swap(m_read_buffer, m_write_buffer);
m_write_buffer->size = 0;
m_read_buffer_index = 0;
@ -64,7 +64,7 @@ ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
ASSERT(size > 0);
VERIFY(size > 0);
LOCKER(m_lock);
size_t bytes_to_write = min(size, m_space_for_writing);
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
@ -81,7 +81,7 @@ ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
ASSERT(size > 0);
VERIFY(size > 0);
LOCKER(m_lock);
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
flip();

View File

@ -56,7 +56,7 @@ public:
void set_unblock_callback(Function<void()> callback)
{
ASSERT(!m_unblock_callback);
VERIFY(!m_unblock_callback);
m_unblock_callback = move(callback);
}

View File

@ -78,7 +78,7 @@ public:
{
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
auto& entry = const_cast<CacheEntry&>(*it->value);
ASSERT(entry.block_index == block_index);
VERIFY(entry.block_index == block_index);
return entry;
}
@ -90,7 +90,7 @@ public:
return get(block_index);
}
ASSERT(m_clean_list.last());
VERIFY(m_clean_list.last());
auto& new_entry = *m_clean_list.last();
m_clean_list.prepend(new_entry);
@ -127,7 +127,7 @@ private:
BlockBasedFS::BlockBasedFS(FileDescription& file_description)
: FileBackedFS(file_description)
{
ASSERT(file_description.file().is_seekable());
VERIFY(file_description.file().is_seekable());
}
BlockBasedFS::~BlockBasedFS()
@ -136,8 +136,8 @@ BlockBasedFS::~BlockBasedFS()
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
if (!allow_cache) {
@ -147,7 +147,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
auto nwritten = file_description().write(data, count);
if (nwritten.is_error())
return nwritten.error();
ASSERT(nwritten.value() == count);
VERIFY(nwritten.value() == count);
return KSuccess;
}
@ -171,8 +171,8 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
u32 base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nread = file_description().read(buffer, m_logical_block_size);
ASSERT(!nread.is_error());
ASSERT(nread.value() == m_logical_block_size);
VERIFY(!nread.is_error());
VERIFY(nread.value() == m_logical_block_size);
return true;
}
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
@ -180,8 +180,8 @@ bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
size_t base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nwritten = file_description().write(buffer, m_logical_block_size);
ASSERT(!nwritten.is_error());
ASSERT(nwritten.value() == m_logical_block_size);
VERIFY(!nwritten.is_error());
VERIFY(nwritten.value() == m_logical_block_size);
return true;
}
@ -208,7 +208,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
{
ASSERT(m_logical_block_size);
VERIFY(m_logical_block_size);
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
for (unsigned i = 0; i < count; ++i) {
auto result = write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache);
@ -220,8 +220,8 @@ KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserO
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
if (!allow_cache) {
@ -231,7 +231,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
auto nread = file_description().read(*buffer, count);
if (nread.is_error())
return nread.error();
ASSERT(nread.value() == count);
VERIFY(nread.value() == count);
return KSuccess;
}
@ -243,7 +243,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
auto nread = file_description().read(entry_data_buffer, block_size());
if (nread.is_error())
return nread.error();
ASSERT(nread.value() == block_size());
VERIFY(nread.value() == block_size());
entry.has_data = true;
}
if (buffer && !buffer->write(entry.data + offset, count))
@ -253,7 +253,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
{
ASSERT(m_logical_block_size);
VERIFY(m_logical_block_size);
if (!count)
return EINVAL;
if (count == 1)

View File

@ -61,7 +61,7 @@ size_t DevFS::allocate_inode_index()
{
LOCKER(m_lock);
m_next_inode_index = m_next_inode_index.value() + 1;
ASSERT(m_next_inode_index > 0);
VERIFY(m_next_inode_index > 0);
return 1 + m_next_inode_index.value();
}
@ -102,17 +102,17 @@ DevFSInode::DevFSInode(DevFS& fs)
}
ssize_t DevFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResult DevFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
RefPtr<Inode> DevFSInode::lookup(StringView)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DevFSInode::flush_metadata()
@ -121,7 +121,7 @@ void DevFSInode::flush_metadata()
ssize_t DevFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResultOr<NonnullRefPtr<Inode>> DevFSInode::create_child(const String&, mode_t, dev_t, uid_t, gid_t)
@ -141,7 +141,7 @@ KResult DevFSInode::remove_child(const StringView&)
KResultOr<size_t> DevFSInode::directory_entry_count() const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResult DevFSInode::chmod(mode_t)
@ -174,8 +174,8 @@ DevFSLinkInode::DevFSLinkInode(DevFS& fs, String name)
ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& buffer, FileDescription*) const
{
LOCKER(m_lock);
ASSERT(offset == 0);
ASSERT(!m_link.is_null());
VERIFY(offset == 0);
VERIFY(!m_link.is_null());
if (!buffer.write(((const u8*)m_link.substring_view(0).characters_without_null_termination()) + offset, m_link.length()))
return -EFAULT;
return m_link.length();
@ -195,8 +195,8 @@ InodeMetadata DevFSLinkInode::metadata() const
ssize_t DevFSLinkInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription*)
{
LOCKER(m_lock);
ASSERT(offset == 0);
ASSERT(buffer.is_kernel_buffer());
VERIFY(offset == 0);
VERIFY(buffer.is_kernel_buffer());
m_link = buffer.copy_into_string(count);
return count;
}
@ -361,7 +361,7 @@ String DevFSDeviceInode::name() const
ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
LOCKER(m_lock);
ASSERT(!!description);
VERIFY(!!description);
if (!m_attached_device->can_read(*description, offset))
return -EIO;
auto nread = const_cast<Device&>(*m_attached_device).read(*description, offset, buffer, count);
@ -387,7 +387,7 @@ InodeMetadata DevFSDeviceInode::metadata() const
ssize_t DevFSDeviceInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
{
LOCKER(m_lock);
ASSERT(!!description);
VERIFY(!!description);
if (!m_attached_device->can_read(*description, offset))
return -EIO;
auto nread = const_cast<Device&>(*m_attached_device).write(*description, offset, buffer, count);

View File

@ -63,7 +63,7 @@ bool DevPtsFS::initialize()
static unsigned inode_index_to_pty_index(InodeIndex inode_index)
{
ASSERT(inode_index > 1);
VERIFY(inode_index > 1);
return inode_index.value() - 2;
}
@ -84,7 +84,7 @@ RefPtr<Inode> DevPtsFS::get_inode(InodeIdentifier inode_id) const
unsigned pty_index = inode_index_to_pty_index(inode_id.index());
auto* device = Device::get_device(201, pty_index);
ASSERT(device);
VERIFY(device);
auto inode = adopt(*new DevPtsFSInode(const_cast<DevPtsFS&>(*this), inode_id.index(), static_cast<SlavePTY*>(device)));
inode->m_metadata.inode = inode_id;
@ -122,12 +122,12 @@ DevPtsFSInode::~DevPtsFSInode()
ssize_t DevPtsFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ssize_t DevPtsFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
InodeMetadata DevPtsFSInode::metadata() const
@ -159,14 +159,14 @@ KResult DevPtsFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEn
KResultOr<size_t> DevPtsFSInode::directory_entry_count() const
{
ASSERT(identifier().index() == 1);
VERIFY(identifier().index() == 1);
return 2 + s_ptys->size();
}
RefPtr<Inode> DevPtsFSInode::lookup(StringView name)
{
ASSERT(identifier().index() == 1);
VERIFY(identifier().index() == 1);
if (name == "." || name == "..")
return this;

View File

@ -91,28 +91,28 @@ Ext2FS::~Ext2FS()
bool Ext2FS::flush_super_block()
{
LOCKER(m_lock);
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
ASSERT(success);
VERIFY(success);
return true;
}
const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
{
// FIXME: Should this fail gracefully somehow?
ASSERT(group_index <= m_block_group_count);
ASSERT(group_index > 0);
VERIFY(group_index <= m_block_group_count);
VERIFY(group_index > 0);
return block_group_descriptors()[group_index.value() - 1];
}
bool Ext2FS::initialize()
{
LOCKER(m_lock);
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
ASSERT(success);
VERIFY(success);
auto& super_block = this->super_block();
if constexpr (EXT2_DEBUG) {
@ -134,7 +134,7 @@ bool Ext2FS::initialize()
set_block_size(EXT2_BLOCK_SIZE(&super_block));
ASSERT(block_size() <= (int)max_block_size);
VERIFY(block_size() <= (int)max_block_size);
m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);
@ -227,7 +227,7 @@ Ext2FS::BlockListShape Ext2FS::compute_block_list_shape(unsigned blocks) const
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block * entries_per_block);
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block);
blocks_remaining -= shape.triply_indirect_blocks;
ASSERT(blocks_remaining == 0);
VERIFY(blocks_remaining == 0);
return shape;
}
@ -302,7 +302,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
auto block_contents = ByteBuffer::create_uninitialized(block_size());
OutputMemoryStream stream { block_contents };
ASSERT(new_shape.indirect_blocks <= entries_per_block);
VERIFY(new_shape.indirect_blocks <= entries_per_block);
for (unsigned i = 0; i < new_shape.indirect_blocks; ++i) {
stream << blocks[output_block_index++].value();
--remaining_blocks;
@ -355,7 +355,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
}
auto* dind_block_as_pointers = (unsigned*)dind_block_contents.data();
ASSERT(indirect_block_count <= entries_per_block);
VERIFY(indirect_block_count <= entries_per_block);
for (unsigned i = 0; i < indirect_block_count; ++i) {
bool ind_block_dirty = false;
@ -386,7 +386,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (entries_to_write > entries_per_block)
entries_to_write = entries_per_block;
ASSERT(entries_to_write <= entries_per_block);
VERIFY(entries_to_write <= entries_per_block);
for (unsigned j = 0; j < entries_to_write; ++j) {
BlockIndex output_block = blocks[output_block_index++];
if (ind_block_as_pointers[j] != output_block) {
@ -405,7 +405,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (ind_block_dirty) {
auto buffer = UserOrKernelBuffer::for_kernel_buffer(ind_block_contents.data());
int err = write_block(indirect_block_index, buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
}
}
for (unsigned i = indirect_block_count; i < entries_per_block; ++i) {
@ -418,7 +418,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (dind_block_dirty) {
auto buffer = UserOrKernelBuffer::for_kernel_buffer(dind_block_contents.data());
int err = write_block(e2inode.i_block[EXT2_DIND_BLOCK], buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
}
}
@ -427,7 +427,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
// FIXME: Implement!
dbgln("we don't know how to write tind ext2fs blocks yet!");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode(const ext2_inode& e2inode, bool include_block_list_blocks) const
@ -536,13 +536,13 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
void Ext2FS::free_inode(Ext2FSInode& inode)
{
LOCKER(m_lock);
ASSERT(inode.m_raw_inode.i_links_count == 0);
VERIFY(inode.m_raw_inode.i_links_count == 0);
dbgln_if(EXT2_DEBUG, "Ext2FS: Inode {} has no more links, time to delete!", inode.index());
// Mark all blocks used by this inode as free.
auto block_list = block_list_for_inode(inode.m_raw_inode, true);
for (auto block_index : block_list) {
ASSERT(block_index <= super_block().s_blocks_count);
VERIFY(block_index <= super_block().s_blocks_count);
if (block_index.value())
set_block_allocation_state(block_index, false);
}
@ -674,7 +674,7 @@ void Ext2FSInode::flush_metadata()
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
{
LOCKER(m_lock);
ASSERT(inode.fsid() == fsid());
VERIFY(inode.fsid() == fsid());
{
auto it = m_inode_cache.find(inode.index());
@ -706,14 +706,14 @@ RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
Locker inode_locker(m_lock);
ASSERT(offset >= 0);
VERIFY(offset >= 0);
if (m_raw_inode.i_size == 0)
return 0;
// Symbolic links shorter than 60 characters are store inline inside the i_block array.
// This avoids wasting an entire block on short links. (Most links are short.)
if (is_symlink() && size() < max_inline_symlink_length) {
ASSERT(offset == 0);
VERIFY(offset == 0);
ssize_t nread = min((off_t)size() - offset, static_cast<off_t>(count));
if (!buffer.write(((const u8*)m_raw_inode.i_block) + offset, (size_t)nread))
return -EFAULT;
@ -748,7 +748,7 @@ ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer&
for (size_t bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; ++bi) {
auto block_index = m_block_list[bi];
ASSERT(block_index.value());
VERIFY(block_index.value());
size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0;
size_t num_bytes_to_copy = min(block_size - offset_into_block, remaining_count);
auto buffer_offset = buffer.offset(nread);
@ -827,7 +827,7 @@ KResult Ext2FSInode::resize(u64 new_size)
auto nwritten = write_bytes(clear_from, min(sizeof(zero_buffer), bytes_to_clear), UserOrKernelBuffer::for_kernel_buffer(zero_buffer), nullptr);
if (nwritten < 0)
return KResult((ErrnoCode)-nwritten);
ASSERT(nwritten != 0);
VERIFY(nwritten != 0);
bytes_to_clear -= nwritten;
clear_from += nwritten;
}
@ -838,8 +838,8 @@ KResult Ext2FSInode::resize(u64 new_size)
ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& data, FileDescription* description)
{
ASSERT(offset >= 0);
ASSERT(count >= 0);
VERIFY(offset >= 0);
VERIFY(count >= 0);
Locker inode_locker(m_lock);
Locker fs_locker(fs().m_lock);
@ -849,7 +849,7 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernel
return result;
if (is_symlink()) {
ASSERT(offset == 0);
VERIFY(offset == 0);
if (max((size_t)(offset + count), (size_t)m_raw_inode.i_size) < max_inline_symlink_length) {
dbgln_if(EXT2_DEBUG, "Ext2FS: write_bytes poking into i_block array for inline symlink '{}' ({} bytes)", data.copy_into_string(count), count);
if (!data.read(((u8*)m_raw_inode.i_block) + offset, (size_t)count))
@ -937,7 +937,7 @@ u8 Ext2FS::internal_file_type_to_directory_entry_type(const DirectoryEntryView&
KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
{
LOCKER(m_lock);
ASSERT(is_directory());
VERIFY(is_directory());
dbgln_if(EXT2_VERY_DEBUG, "Ext2FS: Traversing as directory: {}", index());
@ -1020,7 +1020,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(const String& name, mo
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
{
LOCKER(m_lock);
ASSERT(is_directory());
VERIFY(is_directory());
if (name.length() > EXT2_NAME_LEN)
return ENAMETOOLONG;
@ -1064,7 +1064,7 @@ KResult Ext2FSInode::remove_child(const StringView& name)
{
LOCKER(m_lock);
dbgln_if(EXT2_DEBUG, "Ext2FSInode::remove_child('{}') in inode {}", name, index());
ASSERT(is_directory());
VERIFY(is_directory());
auto it = m_lookup_cache.find(name);
if (it == m_lookup_cache.end())
@ -1162,7 +1162,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
}
}
ASSERT(found_a_group);
VERIFY(found_a_group);
auto& bgd = group_descriptor(group_index);
auto& cached_bitmap = get_bitmap_block(bgd.bg_block_bitmap);
@ -1172,7 +1172,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
BlockIndex first_block_in_group = (group_index.value() - 1) * blocks_per_group() + first_block_index().value();
size_t free_region_size = 0;
auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
ASSERT(first_unset_bit_index.has_value());
VERIFY(first_unset_bit_index.has_value());
dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
for (size_t i = 0; i < free_region_size; ++i) {
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
@ -1182,7 +1182,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
}
}
ASSERT(blocks.size() == count);
VERIFY(blocks.size() == count);
return blocks;
}
@ -1239,7 +1239,7 @@ InodeIndex Ext2FS::find_a_free_inode(GroupIndex preferred_group)
InodeIndex inode = first_free_inode_in_group;
dbgln_if(EXT2_DEBUG, "Ext2FS: found suitable inode {}", inode);
ASSERT(get_inode_allocation_state(inode) == false);
VERIFY(get_inode_allocation_state(inode) == false);
return inode;
}
@ -1285,7 +1285,7 @@ bool Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
dbgln_if(EXT2_DEBUG, "Ext2FS: set_inode_allocation_state({}) {} -> {}", inode_index, current_state, new_state);
if (current_state == new_state) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return true;
}
@ -1325,14 +1325,14 @@ Ext2FS::CachedBitmap& Ext2FS::get_bitmap_block(BlockIndex bitmap_block_index)
auto block = KBuffer::create_with_size(block_size(), Region::Access::Read | Region::Access::Write, "Ext2FS: Cached bitmap block");
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block.data());
int err = read_block(bitmap_block_index, &buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
m_cached_bitmaps.append(make<CachedBitmap>(bitmap_block_index, move(block)));
return *m_cached_bitmaps.last();
}
bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
{
ASSERT(block_index != 0);
VERIFY(block_index != 0);
LOCKER(m_lock);
auto group_index = group_index_from_block_index(block_index);
@ -1346,7 +1346,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
dbgln_if(EXT2_DEBUG, "Ext2FS: block {} state: {} -> {} (in bitmap block {})", block_index, current_state, new_state, bgd.bg_block_bitmap);
if (current_state == new_state) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return true;
}
@ -1374,7 +1374,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
{
LOCKER(m_lock);
ASSERT(is_directory(mode));
VERIFY(is_directory(mode));
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
if (inode_or_error.is_error())
@ -1424,7 +1424,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
// Looks like we're good, time to update the inode bitmap and group+global inode counters.
bool success = set_inode_allocation_state(inode_id, true);
ASSERT(success);
VERIFY(success);
struct timeval now;
kgettimeofday(now);
@ -1450,7 +1450,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
e2inode.i_flags = 0;
success = write_ext2_inode(inode_id, e2inode);
ASSERT(success);
VERIFY(success);
// We might have cached the fact that this inode didn't exist. Wipe the slate.
m_inode_cache.remove(inode_id);
@ -1487,7 +1487,7 @@ bool Ext2FSInode::populate_lookup_cache() const
RefPtr<Inode> Ext2FSInode::lookup(StringView name)
{
ASSERT(is_directory());
VERIFY(is_directory());
if (!populate_lookup_cache())
return {};
LOCKER(m_lock);
@ -1549,7 +1549,7 @@ KResult Ext2FSInode::decrement_link_count()
LOCKER(m_lock);
if (fs().is_readonly())
return EROFS;
ASSERT(m_raw_inode.i_links_count);
VERIFY(m_raw_inode.i_links_count);
--m_raw_inode.i_links_count;
if (ref_count() == 1 && m_raw_inode.i_links_count == 0)
fs().uncache_inode(index());
@ -1565,7 +1565,7 @@ void Ext2FS::uncache_inode(InodeIndex index)
KResultOr<size_t> Ext2FSInode::directory_entry_count() const
{
ASSERT(is_directory());
VERIFY(is_directory());
LOCKER(m_lock);
populate_lookup_cache();
return m_lookup_cache.size();

View File

@ -134,13 +134,13 @@ void FIFO::detach(Direction direction)
#if FIFO_DEBUG
klog() << "close reader (" << m_readers << " - 1)";
#endif
ASSERT(m_readers);
VERIFY(m_readers);
--m_readers;
} else if (direction == Direction::Writer) {
#if FIFO_DEBUG
klog() << "close writer (" << m_writers << " - 1)";
#endif
ASSERT(m_writers);
VERIFY(m_writers);
--m_writers;
}

View File

@ -50,7 +50,7 @@ public:
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override
{
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return !blocker.unblock(true, data);
}
@ -59,7 +59,7 @@ public:
{
ScopedSpinLock lock(m_lock);
do_unblock([&](auto& b, void* data, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return blocker.unblock(false, data);
});
@ -159,7 +159,7 @@ protected:
private:
ALWAYS_INLINE void do_evaluate_block_conditions()
{
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
block_condition().unblock();
}

View File

@ -107,7 +107,7 @@ Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBloc
if ((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::SocketFlags) {
auto* sock = socket();
ASSERT(sock);
VERIFY(sock);
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Accept) && sock->can_accept())
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Accept;
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Connect) && sock->setup_state() == Socket::SetupState::Completed)
@ -205,8 +205,8 @@ bool FileDescription::can_read() const
KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
{
// HACK ALERT: (This entire function)
ASSERT(m_file->is_inode());
ASSERT(m_inode);
VERIFY(m_file->is_inode());
VERIFY(m_inode);
return m_inode->read_entire(this);
}

View File

@ -95,7 +95,7 @@ void FS::lock_all()
void FS::set_block_size(size_t block_size)
{
ASSERT(block_size > 0);
VERIFY(block_size > 0);
if (block_size == m_block_size)
return;
m_block_size = block_size;

View File

@ -49,7 +49,7 @@ SpinLock<u32>& Inode::all_inodes_lock()
InlineLinkedList<Inode>& Inode::all_with_lock()
{
ASSERT(s_all_inodes_lock.is_locked());
VERIFY(s_all_inodes_lock.is_locked());
return *s_list;
}
@ -66,7 +66,7 @@ void Inode::sync()
}
for (auto& inode : inodes) {
ASSERT(inode.is_metadata_dirty());
VERIFY(inode.is_metadata_dirty());
inode.flush_metadata();
}
}
@ -83,7 +83,7 @@ KResultOr<NonnullOwnPtr<KBuffer>> Inode::read_entire(FileDescription* descriptio
nread = read_bytes(offset, sizeof(buffer), buf, description);
if (nread < 0)
return KResult((ErrnoCode)-nread);
ASSERT(nread <= (ssize_t)sizeof(buffer));
VERIFY(nread <= (ssize_t)sizeof(buffer));
if (nread <= 0)
break;
builder.append((const char*)buffer, nread);
@ -203,27 +203,27 @@ bool Inode::unbind_socket()
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
LOCKER(m_lock);
ASSERT(!m_watchers.contains(&watcher));
VERIFY(!m_watchers.contains(&watcher));
m_watchers.set(&watcher);
}
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
LOCKER(m_lock);
ASSERT(m_watchers.contains(&watcher));
VERIFY(m_watchers.contains(&watcher));
m_watchers.remove(&watcher);
}
NonnullRefPtr<FIFO> Inode::fifo()
{
LOCKER(m_lock);
ASSERT(metadata().is_fifo());
VERIFY(metadata().is_fifo());
// FIXME: Release m_fifo when it is closed by all readers and writers
if (!m_fifo)
m_fifo = FIFO::create(metadata().uid);
ASSERT(m_fifo);
VERIFY(m_fifo);
return *m_fifo;
}
@ -233,7 +233,7 @@ void Inode::set_metadata_dirty(bool metadata_dirty)
if (metadata_dirty) {
// Sanity check.
ASSERT(!fs().is_readonly());
VERIFY(!fs().is_readonly());
}
if (m_metadata_dirty == metadata_dirty)

View File

@ -122,8 +122,8 @@ KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& descriptio
String InodeFile::absolute_path(const FileDescription& description) const
{
ASSERT_NOT_REACHED();
ASSERT(description.custody());
VERIFY_NOT_REACHED();
VERIFY(description.custody());
return description.absolute_path();
}
@ -140,15 +140,15 @@ KResult InodeFile::truncate(u64 size)
KResult InodeFile::chown(FileDescription& description, uid_t uid, gid_t gid)
{
ASSERT(description.inode() == m_inode);
ASSERT(description.custody());
VERIFY(description.inode() == m_inode);
VERIFY(description.custody());
return VFS::the().chown(*description.custody(), uid, gid);
}
KResult InodeFile::chmod(FileDescription& description, mode_t mode)
{
ASSERT(description.inode() == m_inode);
ASSERT(description.custody());
VERIFY(description.inode() == m_inode);
VERIFY(description.custody());
return VFS::the().chmod(*description.custody(), mode);
}

View File

@ -60,7 +60,7 @@ bool InodeWatcher::can_write(const FileDescription&, size_t) const
KResultOr<size_t> InodeWatcher::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t buffer_size)
{
LOCKER(m_lock);
ASSERT(!m_queue.is_empty() || !m_inode);
VERIFY(!m_queue.is_empty() || !m_inode);
if (!m_inode)
return 0;

View File

@ -44,7 +44,7 @@ Plan9FS::~Plan9FS()
{
// Make sure to destroy the root inode before the FS gets destroyed.
if (m_root_inode) {
ASSERT(m_root_inode->ref_count() == 1);
VERIFY(m_root_inode->ref_count() == 1);
m_root_inode = nullptr;
}
}
@ -153,7 +153,7 @@ public:
template<typename N>
Decoder& read_number(N& number)
{
ASSERT(sizeof(number) <= m_data.length());
VERIFY(sizeof(number) <= m_data.length());
memcpy(&number, m_data.characters_without_null_termination(), sizeof(number));
m_data = m_data.substring_view(sizeof(number), m_data.length() - sizeof(number));
return *this;
@ -170,14 +170,14 @@ public:
template<typename T>
Message& operator>>(T& t)
{
ASSERT(m_have_been_built);
VERIFY(m_have_been_built);
m_built.decoder >> t;
return *this;
}
StringView read_data()
{
ASSERT(m_have_been_built);
VERIFY(m_have_been_built);
return m_built.decoder.read_data();
}
@ -197,7 +197,7 @@ private:
template<typename N>
Message& append_number(N number)
{
ASSERT(!m_have_been_built);
VERIFY(!m_have_been_built);
m_builder.append(reinterpret_cast<const char*>(&number), sizeof(number));
return *this;
}
@ -330,7 +330,7 @@ Plan9FS::Message::Decoder& Plan9FS::Message::Decoder::operator>>(StringView& str
{
u16 length;
*this >> length;
ASSERT(length <= m_data.length());
VERIFY(length <= m_data.length());
string = m_data.substring_view(0, length);
m_data = m_data.substring_view_starting_after_substring(string);
return *this;
@ -340,7 +340,7 @@ StringView Plan9FS::Message::Decoder::read_data()
{
u32 length;
*this >> length;
ASSERT(length <= m_data.length());
VERIFY(length <= m_data.length());
auto data = m_data.substring_view(0, length);
m_data = m_data.substring_view_starting_after_substring(data);
return data;
@ -401,12 +401,12 @@ Plan9FS::Message& Plan9FS::Message::operator=(Message&& message)
const KBuffer& Plan9FS::Message::build()
{
ASSERT(!m_have_been_built);
VERIFY(!m_have_been_built);
auto tmp_buffer = m_builder.build();
// FIXME: We should not assume success here.
ASSERT(tmp_buffer);
VERIFY(tmp_buffer);
m_have_been_built = true;
m_builder.~KBufferBuilder();
@ -470,7 +470,7 @@ bool Plan9FS::Plan9FSBlockCondition::should_add_blocker(Thread::Blocker& b, void
void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
{
unblock([&](Thread::Blocker& b, void*, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock(tag);
});
@ -479,7 +479,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
void Plan9FS::Plan9FSBlockCondition::unblock_all()
{
unblock([&](Thread::Blocker& b, void*, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock();
});
@ -498,13 +498,13 @@ bool Plan9FS::is_complete(const ReceiveCompletion& completion)
LOCKER(m_lock);
if (m_completions.contains(completion.tag)) {
// If it's still in the map then it can't be complete
ASSERT(!completion.completed);
VERIFY(!completion.completed);
return false;
}
// if it's not in the map anymore, it must be complete. But we MUST
// hold m_lock to be able to check completion.completed!
ASSERT(completion.completed);
VERIFY(completion.completed);
return true;
}

View File

@ -150,14 +150,14 @@ static inline ProcFileType to_proc_file_type(const InodeIdentifier& identifier)
static inline int to_fd(const InodeIdentifier& identifier)
{
ASSERT(to_proc_parent_directory(identifier) == PDI_PID_fd);
VERIFY(to_proc_parent_directory(identifier) == PDI_PID_fd);
return (identifier.index().value() & 0xff) - FI_MaxStaticFileIndex;
}
static inline size_t to_sys_index(const InodeIdentifier& identifier)
{
ASSERT(to_proc_parent_directory(identifier) == PDI_Root_sys);
ASSERT(to_proc_file_type(identifier) == FI_Root_sys_variable);
VERIFY(to_proc_parent_directory(identifier) == PDI_Root_sys);
VERIFY(to_proc_file_type(identifier) == FI_Root_sys_variable);
return identifier.index().value() >> 16u;
}
@ -178,7 +178,7 @@ static inline InodeIdentifier to_identifier_with_stack(unsigned fsid, ThreadID t
static inline InodeIdentifier sys_var_to_identifier(unsigned fsid, unsigned index)
{
ASSERT(index < 256);
VERIFY(index < 256);
return { fsid, (PDI_Root_sys << 12u) | (index << 16u) | FI_Root_sys_variable };
}
@ -199,7 +199,7 @@ static inline InodeIdentifier to_parent_id(const InodeIdentifier& identifier)
case PDI_PID_stacks:
return to_identifier(identifier.fsid(), PDI_PID, to_pid(identifier), FI_PID_stacks);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
#if 0
@ -436,7 +436,7 @@ static bool procfs$devices(InodeIdentifier, KBufferBuilder& builder)
else if (device.is_character_device())
obj.add("type", "character");
else
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
});
array.finish();
return true;
@ -633,7 +633,7 @@ static bool procfs$pid_exe(InodeIdentifier identifier, KBufferBuilder& builder)
if (!process)
return false;
auto* custody = process->executable();
ASSERT(custody);
VERIFY(custody);
builder.append(custody->absolute_path().bytes());
return true;
}
@ -884,14 +884,14 @@ SysVariable& SysVariable::for_inode(InodeIdentifier id)
if (index >= sys_variables().size())
return sys_variables()[0];
auto& variable = sys_variables()[index];
ASSERT(variable.address);
VERIFY(variable.address);
return variable;
}
static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::Boolean);
VERIFY(variable.type == SysVariable::Type::Boolean);
u8 buffer[2];
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
@ -907,7 +907,7 @@ static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::Boolean);
VERIFY(variable.type == SysVariable::Type::Boolean);
char value = 0;
bool did_read = false;
@ -920,7 +920,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
});
if (nread < 0)
return nread;
ASSERT(nread == 0 || (nread == 1 && did_read));
VERIFY(nread == 0 || (nread == 1 && did_read));
if (nread == 0 || !(value == '0' || value == '1'))
return (ssize_t)size;
@ -936,7 +936,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::String);
VERIFY(variable.type == SysVariable::Type::String);
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
LOCKER(lockable_string->lock(), Lock::Mode::Shared);
@ -947,7 +947,7 @@ static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
static ssize_t write_sys_string(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::String);
VERIFY(variable.type == SysVariable::Type::String);
auto string_copy = buffer.copy_into_string(size);
if (string_copy.is_null())
@ -1032,7 +1032,7 @@ RefPtr<Inode> ProcFS::get_inode(InodeIdentifier inode_id) const
}
auto inode = adopt(*new ProcFSInode(const_cast<ProcFS&>(*this), inode_id.index()));
auto result = m_inodes.set(inode_id.index().value(), inode.ptr());
ASSERT(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
VERIFY(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
return inode;
}
@ -1081,7 +1081,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
bool (*read_callback)(InodeIdentifier, KBufferBuilder&) = nullptr;
if (directory_entry) {
read_callback = directory_entry->read_callback;
ASSERT(read_callback);
VERIFY(read_callback);
} else {
switch (to_proc_parent_directory(identifier())) {
case PDI_PID_fd:
@ -1093,7 +1093,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
case PDI_Root_sys:
switch (SysVariable::for_inode(identifier()).type) {
case SysVariable::Type::Invalid:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case SysVariable::Type::Boolean:
read_callback = read_sys_bool;
break;
@ -1103,10 +1103,10 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
}
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT(read_callback);
VERIFY(read_callback);
}
if (!cached_data)
@ -1231,8 +1231,8 @@ InodeMetadata ProcFSInode::metadata() const
ssize_t ProcFSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
dbgln_if(PROCFS_DEBUG, "ProcFS: read_bytes offset: {} count: {}", offset, count);
ASSERT(offset >= 0);
ASSERT(buffer.user_or_kernel_ptr());
VERIFY(offset >= 0);
VERIFY(buffer.user_or_kernel_ptr());
if (!description)
return -EIO;
@ -1350,7 +1350,7 @@ KResult ProcFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntr
RefPtr<Inode> ProcFSInode::lookup(StringView name)
{
ASSERT(is_directory());
VERIFY(is_directory());
if (name == ".")
return this;
if (name == "..")
@ -1490,7 +1490,7 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
if (to_proc_parent_directory(identifier()) == PDI_Root_sys) {
switch (SysVariable::for_inode(identifier()).type) {
case SysVariable::Type::Invalid:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case SysVariable::Type::Boolean:
write_callback = write_sys_bool;
break;
@ -1506,9 +1506,9 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
write_callback = directory_entry->write_callback;
}
ASSERT(is_persistent_inode(identifier()));
VERIFY(is_persistent_inode(identifier()));
// FIXME: Being able to write into ProcFS at a non-zero offset seems like something we should maybe support..
ASSERT(offset == 0);
VERIFY(offset == 0);
ssize_t nwritten = write_callback(identifier(), buffer, (size_t)size);
if (nwritten < 0)
klog() << "ProcFS: Writing " << size << " bytes failed: " << nwritten;
@ -1565,7 +1565,7 @@ KResultOr<NonnullRefPtr<Custody>> ProcFSInode::resolve_as_link(Custody& base, Re
res = &process->root_directory();
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
if (!res)
@ -1666,7 +1666,7 @@ KResult ProcFSInode::remove_child([[maybe_unused]] const StringView& name)
KResultOr<size_t> ProcFSInode::directory_entry_count() const
{
ASSERT(is_directory());
VERIFY(is_directory());
size_t count = 0;
KResult result = traverse_as_directory([&count](auto&) {
++count;

Some files were not shown because too many files have changed in this diff Show More