diff --git a/src/common/address_space.h b/src/common/address_space.h index fd2f32b7d..8e13935af 100644 --- a/src/common/address_space.h +++ b/src/common/address_space.h @@ -22,7 +22,8 @@ struct EmptyStruct {}; */ template -requires AddressSpaceValid class FlatAddressSpaceMap { +requires AddressSpaceValid +class FlatAddressSpaceMap { private: std::function unmapCallback{}; //!< Callback called when the mappings in an region have changed @@ -40,8 +41,8 @@ protected: Block() = default; - Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo) - : virt(virt), phys(phys), extraInfo(extraInfo) {} + Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_) + : virt(virt_), phys(phys_), extraInfo(extraInfo_) {} constexpr bool Valid() { return virt != UnmappedVa; @@ -102,7 +103,8 @@ public: * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block */ template -requires AddressSpaceValid class FlatAllocator +requires AddressSpaceValid +class FlatAllocator : public FlatAddressSpaceMap { private: using Base = FlatAddressSpaceMap; diff --git a/src/common/algorithm.h b/src/common/algorithm.h index 055dca142..c27c9241d 100644 --- a/src/common/algorithm.h +++ b/src/common/algorithm.h @@ -27,7 +27,7 @@ template > template T FoldRight(T initial_value, Func&& func, Args&&... args) { T value{initial_value}; - const auto high_func = [&value, &func](T x) { value = func(value, x); }; + const auto high_func = [&value, &func](U x) { value = func(value, x); }; (std::invoke(high_func, std::forward(args)), ...); return value; } diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 368b7b98c..7e1df62b1 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h @@ -127,14 +127,11 @@ public: } } - BitField(T val) { - Assign(val); - } - - BitField& operator=(T val) { - Assign(val); - return *this; - } + // This constructor and assignment operator might be considered ambiguous: + // Would they initialize the storage or just the bitfield? + // Hence, delete them. Use the Assign method to set bitfield values! + BitField(T val) = delete; + BitField& operator=(T val) = delete; constexpr BitField() noexcept = default; diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp index aed04d0b5..3a7a75aa7 100644 --- a/src/common/multi_level_page_table.cpp +++ b/src/common/multi_level_page_table.cpp @@ -1,8 +1,6 @@ #include "common/multi_level_page_table.inc" namespace Common { -template class Common::MultiLevelPageTable; -template class Common::MultiLevelPageTable; -template class Common::MultiLevelPageTable; +template class Common::MultiLevelPageTable; template class Common::MultiLevelPageTable; } // namespace Common diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc index 9a68cad93..4def6dba8 100644 --- a/src/common/multi_level_page_table.inc +++ b/src/common/multi_level_page_table.inc @@ -30,7 +30,7 @@ MultiLevelPageTable::MultiLevelPageTable(std::size_t address_space_bit #ifdef _WIN32 void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; #else - void* base{mmap(nullptr, alloc_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; + void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; if (base == MAP_FAILED) { base = nullptr; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 86d825af9..b02dbb9c9 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -13,7 +13,8 @@ using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::NvCore { -NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) { +NvMap::Handle::Handle(u64 size_, Id id_) + : size(size_), aligned_size(size), orig_size(size), id(id_) { flags.raw = 0; } @@ -21,19 +22,21 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) std::scoped_lock lock(mutex); // Handles cannot be allocated twice - if (allocated) + if (allocated) { return NvResult::AccessDenied; + } flags = pFlags; kind = pKind; align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; // This flag is only applicable for handles with an address passed - if (pAddress) - flags.keep_uncached_after_free = 0; - else + if (pAddress) { + flags.keep_uncached_after_free.Assign(0); + } else { LOG_CRITICAL(Service_NVDRV, "Mapping nvmap handles without a CPU side address is unimplemented!"); + } size = Common::AlignUp(size, YUZU_PAGESIZE); aligned_size = Common::AlignUp(size, align); @@ -48,17 +51,19 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) NvResult NvMap::Handle::Duplicate(bool internal_session) { // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) - if (!allocated) [[unlikely]] + if (!allocated) [[unlikely]] { return NvResult::BadValue; + } std::scoped_lock lock(mutex); // If we internally use FromId the duplication tracking of handles won't work accurately due to // us not implementing per-process handle refs. - if (internal_session) + if (internal_session) { internal_dupes++; - else + } else { dupes++; + } return NvResult::Success; } @@ -92,8 +97,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) { std::scoped_lock lock(handles_lock); auto it{handles.find(handle_description.id)}; - if (it != handles.end()) + if (it != handles.end()) { handles.erase(it); + } return true; } else { @@ -102,8 +108,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) { } NvResult NvMap::CreateHandle(u64 size, std::shared_ptr& result_out) { - if (!size) [[unlikely]] + if (!size) [[unlikely]] { return NvResult::BadValue; + } u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; auto handle_description{std::make_shared(size, id)}; @@ -133,8 +140,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { u32 NvMap::PinHandle(NvMap::Handle::Id handle) { auto handle_description{GetHandle(handle)}; - if (!handle_description) [[unlikely]] + if (!handle_description) [[unlikely]] { return 0; + } std::scoped_lock lock(handle_description->mutex); if (!handle_description->pins) { @@ -183,8 +191,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { void NvMap::UnpinHandle(Handle::Id handle) { auto handle_description{GetHandle(handle)}; - if (!handle_description) + if (!handle_description) { return; + } std::scoped_lock lock(handle_description->mutex); if (--handle_description->pins < 0) { @@ -226,12 +235,13 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna // Try to remove the shared ptr to the handle from the map, if nothing else is using the // handle then it will now be freed when `handle_description` goes out of scope - if (TryRemoveHandle(*handle_description)) + if (TryRemoveHandle(*handle_description)) { LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); - else + } else { LOG_DEBUG(Service_NVDRV, "Tried to free nvmap handle: {} but didn't as it still has duplicates", handle); + } freeInfo = { .address = handle_description->address, diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 4f37dcf43..1082bb58d 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -5,6 +5,7 @@ #pragma once +#include #include #include #include diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index d95a88393..d1beefba6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -188,6 +188,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< allocation_map[params.offset] = { .size = size, + .mappings{}, .page_size = params.page_size, .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, .big_pages = params.page_size != VM::YUZU_PAGESIZE, @@ -474,11 +475,13 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { VaRegion{ .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, .page_size = VM::YUZU_PAGESIZE, + ._pad0_{}, .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, }, VaRegion{ .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, .page_size = vm.big_page_size, + ._pad0_{}, .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, }, }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index a84e4d425..7fffb8e48 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -204,12 +204,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector& input, std::vector event.wait_handle = host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { - auto& event = events[slot]; - if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == + auto& event_ = events[slot]; + if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == EventState::Waiting) { - event.kevent->GetWritableEvent().Signal(); + event_.kevent->GetWritableEvent().Signal(); } - event.status.store(EventState::Signalled, std::memory_order_release); + event_.status.store(EventState::Signalled, std::memory_order_release); }); return NvResult::Timeout; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 5e3820085..fed537039 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -12,8 +12,8 @@ namespace Service::Nvidia::Devices { u32 nvhost_nvdec::next_id{}; -nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core, NvCore::ChannelType::NvDec} {} +nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 490e399f4..2e4ff988c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices { u32 nvhost_vic::next_id{}; -nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) - : nvhost_nvdec_common{system_, core, NvCore::ChannelType::VIC} {} +nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) + : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 992c117f1..f84fc8c37 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -269,7 +269,7 @@ NvResult nvmap::IocFree(const std::vector& input, std::vector& output) { params.address = freeInfo->address; params.size = static_cast(freeInfo->size); params.flags.raw = 0; - params.flags.map_uncached = freeInfo->was_uncached; + params.flags.map_uncached.Assign(freeInfo->was_uncached); } else { // This is possible when there's internel dups or other duplicates. } diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 31c45236e..b26254753 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -6,6 +6,7 @@ #pragma once #include +#include #include #include #include diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index f083811ec..9c917cacf 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size"); class NativeWindow final { public: constexpr explicit NativeWindow(u32 id_) : id{id_} {} + constexpr explicit NativeWindow(const NativeWindow& other) = default; private: const u32 magic = 2; diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index 0726d4d21..e8be58357 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp @@ -269,7 +269,7 @@ std::optional TryGetConstBuffer(const IR::Inst* inst, Environme } std::optional lhs{Track(op1, env)}; if (lhs) { - lhs->shift_left = std::countr_zero(op2.U32()); + lhs->shift_left = static_cast(std::countr_zero(op2.U32())); } return lhs; break; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 2e616cee4..8e26b3f95 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -1323,7 +1323,8 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { return; } if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { - address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size); + address_size = + static_cast(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size)); } const u32 size = address_size; // TODO: Analyze stride and number of vertices vertex_buffers[index] = Binding{