From ca3db0d7c94a20668781830ff852dbf512598efb Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 1 Sep 2022 05:45:22 +0200 Subject: [PATCH] General: address feedback --- src/common/multi_level_page_table.h | 8 +-- src/core/hle/service/nvdrv/core/container.cpp | 12 +++- src/core/hle/service/nvdrv/core/container.h | 23 ++++++-- src/core/hle/service/nvdrv/core/nvmap.cpp | 4 +- src/core/hle/service/nvdrv/core/nvmap.h | 59 +++++++++---------- .../service/nvdrv/core/syncpoint_manager.cpp | 32 +++++----- .../service/nvdrv/core/syncpoint_manager.h | 34 +++++------ .../service/nvdrv/devices/nvhost_as_gpu.cpp | 28 +++++---- .../hle/service/nvdrv/devices/nvhost_ctrl.h | 2 +- .../hle/service/nvdrv/devices/nvhost_gpu.cpp | 2 +- .../service/nvdrv/devices/nvhost_nvdec.cpp | 13 ++-- .../hle/service/nvdrv/devices/nvhost_nvdec.h | 3 - .../nvdrv/devices/nvhost_nvdec_common.cpp | 13 ++-- .../nvdrv/devices/nvhost_nvdec_common.h | 5 -- .../hle/service/nvdrv/devices/nvhost_vic.cpp | 16 ++--- .../hle/service/nvdrv/devices/nvhost_vic.h | 3 - src/core/hle/service/nvdrv/devices/nvmap.h | 4 +- src/core/hle/service/nvdrv/nvdrv.cpp | 4 +- src/core/hle/service/nvdrv/nvdrv.h | 2 +- src/video_core/control/channel_state.cpp | 7 +-- src/video_core/control/channel_state.h | 4 +- src/video_core/control/channel_state_cache.h | 2 +- src/video_core/control/scheduler.cpp | 6 +- src/video_core/control/scheduler.h | 2 +- src/video_core/engines/maxwell_dma.h | 2 +- src/video_core/gpu.cpp | 2 +- src/video_core/host1x/host1x.h | 2 +- src/video_core/host1x/syncpoint_manager.cpp | 12 ++-- src/video_core/host1x/syncpoint_manager.h | 24 ++++---- src/video_core/memory_manager.h | 2 +- 30 files changed, 167 insertions(+), 165 deletions(-) diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h index 08092c89a6..31f6676a06 100644 --- a/src/common/multi_level_page_table.h +++ b/src/common/multi_level_page_table.h @@ -46,19 +46,19 @@ public: void ReserveRange(u64 start, std::size_t size); - [[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const { + [[nodiscard]] const BaseAddr& operator[](std::size_t index) const { return base_ptr[index]; } - [[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) { + [[nodiscard]] BaseAddr& operator[](std::size_t index) { return base_ptr[index]; } - [[nodiscard]] constexpr BaseAddr* data() { + [[nodiscard]] BaseAddr* data() { return base_ptr; } - [[nodiscard]] constexpr const BaseAddr* data() const { + [[nodiscard]] const BaseAddr* data() const { return base_ptr; } diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index d2a6326466..37ca24f5db 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -10,9 +10,11 @@ namespace Service::Nvidia::NvCore { struct ContainerImpl { - ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {} + explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) + : file{host1x_}, manager{host1x_}, device_file_data{} {} NvMap file; SyncpointManager manager; + Container::Host1xDeviceFileData device_file_data; }; Container::Container(Tegra::Host1x::Host1x& host1x_) { @@ -29,6 +31,14 @@ const NvMap& Container::GetNvMapFile() const { return impl->file; } +Container::Host1xDeviceFileData& Container::Host1xDeviceFile() { + return impl->device_file_data; +} + +const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const { + return impl->device_file_data; +} + SyncpointManager& Container::GetSyncpointManager() { return impl->manager; } diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 5c8b958034..b4b63ac90c 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -4,15 +4,15 @@ #pragma once +#include #include +#include -namespace Tegra { +#include "core/hle/service/nvdrv/nvdata.h" -namespace Host1x { +namespace Tegra::Host1x { class Host1x; -} // namespace Host1x - -} // namespace Tegra +} // namespace Tegra::Host1x namespace Service::Nvidia::NvCore { @@ -23,7 +23,7 @@ struct ContainerImpl; class Container { public: - Container(Tegra::Host1x::Host1x& host1x); + explicit Container(Tegra::Host1x::Host1x& host1x); ~Container(); NvMap& GetNvMapFile(); @@ -34,6 +34,17 @@ public: const SyncpointManager& GetSyncpointManager() const; + struct Host1xDeviceFileData { + std::unordered_map fd_to_id{}; + std::deque syncpts_accumulated{}; + u32 nvdec_next_id{}; + u32 vic_next_id{}; + }; + + Host1xDeviceFileData& Host1xDeviceFile(); + + const Host1xDeviceFileData& Host1xDeviceFile() const; + private: std::unique_ptr impl; }; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index e63ec77174..fbd8a74a55 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -119,7 +119,7 @@ std::shared_ptr NvMap::GetHandle(Handle::Id handle) { std::scoped_lock lock(handles_lock); try { return handles.at(handle); - } catch ([[maybe_unused]] std::out_of_range& e) { + } catch (std::out_of_range&) { return nullptr; } } @@ -128,7 +128,7 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { std::scoped_lock lock(handles_lock); try { return handles.at(handle)->address; - } catch ([[maybe_unused]] std::out_of_range& e) { + } catch (std::out_of_range&) { return 0; } } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 6d6dac023a..b9dd3801f4 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -98,35 +98,6 @@ public: } }; -private: - std::list> unmap_queue{}; - std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` - - std::unordered_map> - handles{}; //!< Main owning map of handles - std::mutex handles_lock; //!< Protects access to `handles` - - static constexpr u32 HandleIdIncrement{ - 4}; //!< Each new handle ID is an increment of 4 from the previous - std::atomic next_handle_id{HandleIdIncrement}; - Tegra::Host1x::Host1x& host1x; - - void AddHandle(std::shared_ptr handle); - - /** - * @brief Unmaps and frees the SMMU memory region a handle is mapped to - * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this - */ - void UnmapHandle(Handle& handle_description); - - /** - * @brief Removes a handle from the map taking its dupes into account - * @note handle_description.mutex MUST be locked when calling this - * @return If the handle was removed from the map - */ - bool TryRemoveHandle(const Handle& handle_description); - -public: /** * @brief Encapsulates the result of a FreeHandle operation */ @@ -136,7 +107,7 @@ public: bool was_uncached; //!< If the handle was allocated as uncached }; - NvMap(Tegra::Host1x::Host1x& host1x); + explicit NvMap(Tegra::Host1x::Host1x& host1x); /** * @brief Creates an unallocated handle of the given size @@ -172,5 +143,33 @@ public: * describing the prior state of the handle */ std::optional FreeHandle(Handle::Id handle, bool internal_session); + +private: + std::list> unmap_queue{}; + std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` + + std::unordered_map> + handles{}; //!< Main owning map of handles + std::mutex handles_lock; //!< Protects access to `handles` + + static constexpr u32 HandleIdIncrement{ + 4}; //!< Each new handle ID is an increment of 4 from the previous + std::atomic next_handle_id{HandleIdIncrement}; + Tegra::Host1x::Host1x& host1x; + + void AddHandle(std::shared_ptr handle); + + /** + * @brief Unmaps and frees the SMMU memory region a handle is mapped to + * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this + */ + void UnmapHandle(Handle& handle_description); + + /** + * @brief Removes a handle from the map taking its dupes into account + * @note handle_description.mutex MUST be locked when calling this + * @return If the handle was removed from the map + */ + bool TryRemoveHandle(const Handle& handle_description); }; } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index 072b3a22f1..eda2041a0d 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -18,23 +18,23 @@ SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host ReserveSyncpoint(VBlank0SyncpointId, true); ReserveSyncpoint(VBlank1SyncpointId, true); - for (u32 syncpointId : channel_syncpoints) { - if (syncpointId) { - ReserveSyncpoint(syncpointId, false); + for (u32 syncpoint_id : channel_syncpoints) { + if (syncpoint_id) { + ReserveSyncpoint(syncpoint_id, false); } } } SyncpointManager::~SyncpointManager() = default; -u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { +u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) { if (syncpoints.at(id).reserved) { ASSERT_MSG(false, "Requested syncpoint is in use"); return 0; } syncpoints.at(id).reserved = true; - syncpoints.at(id).interfaceManaged = clientManaged; + syncpoints.at(id).interface_managed = client_managed; return id; } @@ -49,9 +49,9 @@ u32 SyncpointManager::FindFreeSyncpoint() { return 0; } -u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) { +u32 SyncpointManager::AllocateSyncpoint(bool client_managed) { std::lock_guard lock(reservation_lock); - return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged); + return ReserveSyncpoint(FindFreeSyncpoint(), client_managed); } void SyncpointManager::FreeSyncpoint(u32 id) { @@ -64,7 +64,7 @@ bool SyncpointManager::IsSyncpointAllocated(u32 id) { return (id <= SyncpointCount) && syncpoints[id].reserved; } -bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { +bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const { const SyncpointInfo& syncpoint{syncpoints.at(id)}; if (!syncpoint.reserved) { @@ -74,10 +74,10 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { // If the interface manages counters then we don't keep track of the maximum value as it handles // sanity checking the values then - if (syncpoint.interfaceManaged) { - return static_cast(syncpoint.counterMin - threshold) >= 0; + if (syncpoint.interface_managed) { + return static_cast(syncpoint.counter_min - threshold) >= 0; } else { - return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold); + return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold); } } @@ -87,7 +87,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { return 0; } - return syncpoints.at(id).counterMax += amount; + return syncpoints.at(id).counter_max += amount; } u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { @@ -96,7 +96,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { return 0; } - return syncpoints.at(id).counterMin; + return syncpoints.at(id).counter_min; } u32 SyncpointManager::UpdateMin(u32 id) { @@ -105,8 +105,8 @@ u32 SyncpointManager::UpdateMin(u32 id) { return 0; } - syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id); - return syncpoints.at(id).counterMin; + syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id); + return syncpoints.at(id).counter_min; } NvFence SyncpointManager::GetSyncpointFence(u32 id) { @@ -115,7 +115,7 @@ NvFence SyncpointManager::GetSyncpointFence(u32 id) { return NvFence{}; } - return {.id = static_cast(id), .value = syncpoints.at(id).counterMax}; + return {.id = static_cast(id), .value = syncpoints.at(id).counter_max}; } } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index 6b71cd33df..b76ef90326 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -11,13 +11,9 @@ #include "common/common_types.h" #include "core/hle/service/nvdrv/nvdata.h" -namespace Tegra { - -namespace Host1x { +namespace Tegra::Host1x { class Host1x; -} // namespace Host1x - -} // namespace Tegra +} // namespace Tegra::Host1x namespace Service::Nvidia::NvCore { @@ -54,15 +50,15 @@ public: * @brief Finds a free syncpoint and reserves it * @return The ID of the reserved syncpoint */ - u32 AllocateSyncpoint(bool clientManaged); + u32 AllocateSyncpoint(bool client_managed); /** * @url * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 */ - bool HasSyncpointExpired(u32 id, u32 threshold); + bool HasSyncpointExpired(u32 id, u32 threshold) const; - bool IsFenceSignalled(NvFence fence) { + bool IsFenceSignalled(NvFence fence) const { return HasSyncpointExpired(fence.id, fence.value); } @@ -107,7 +103,7 @@ private: /** * @note reservation_lock should be locked when calling this */ - u32 ReserveSyncpoint(u32 id, bool clientManaged); + u32 ReserveSyncpoint(u32 id, bool client_managed); /** * @return The ID of the first free syncpoint @@ -115,15 +111,15 @@ private: u32 FindFreeSyncpoint(); struct SyncpointInfo { - std::atomic counterMin; //!< The least value the syncpoint can be (The value it was - //!< when it was last synchronized with host1x) - std::atomic counterMax; //!< The maximum value the syncpoint can reach according to the - //!< current usage - bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a - //!< client interface is a HW block that can handle host1x - //!< transactions on behalf of a host1x client (Which would otherwise - //!< need to be manually synced using PIO which is synchronous and - //!< requires direct cooperation of the CPU) + std::atomic counter_min; //!< The least value the syncpoint can be (The value it was + //!< when it was last synchronized with host1x) + std::atomic counter_max; //!< The maximum value the syncpoint can reach according to + //!< the current usage + bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a + //!< client interface is a HW block that can handle host1x + //!< transactions on behalf of a host1x client (Which would + //!< otherwise need to be manually synced using PIO which is + //!< synchronous and requires direct cooperation of the CPU) bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved //!< value }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 192503ffc3..6411dbf433 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -106,7 +106,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& return NvResult::BadValue; } - if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) { + if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) { LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); return NvResult::BadValue; } @@ -124,12 +124,13 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector& input, std::vector& vm.va_range_end = params.va_range_end; } - const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS}; - const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS}; + const auto start_pages{static_cast(vm.va_range_start >> VM::PAGE_SIZE_BITS)}; + const auto end_pages{static_cast(vm.va_range_split >> VM::PAGE_SIZE_BITS)}; vm.small_page_allocator = std::make_shared(start_pages, end_pages); - const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits}; - const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; + const auto start_big_pages{static_cast(vm.va_range_split >> vm.big_page_size_bits)}; + const auto end_big_pages{ + static_cast((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)}; vm.big_page_allocator = std::make_unique(start_big_pages, end_big_pages); gmmu = std::make_shared(system, 40, vm.big_page_size_bits, @@ -210,10 +211,11 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state // Only FreeSpace can unmap them fully - if (mapping->sparse_alloc) + if (mapping->sparse_alloc) { gmmu->MapSparse(offset, mapping->size, mapping->big_page); - else + } else { gmmu->Unmap(offset, mapping->size); + } mapping_map.erase(offset); } @@ -256,7 +258,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& allocator.Free(static_cast(params.offset >> page_size_bits), static_cast(allocation.size >> page_size_bits)); allocation_map.erase(params.offset); - } catch ([[maybe_unused]] const std::out_of_range& e) { + } catch (const std::out_of_range&) { return NvResult::BadValue; } @@ -351,7 +353,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectorMap(gpu_address, cpu_address, params.mapping_size, mapping->big_page); return NvResult::Success; - } catch ([[maybe_unused]] const std::out_of_range& e) { + } catch (const std::out_of_range&) { LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", params.offset); return NvResult::BadValue; @@ -367,11 +369,11 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectororig_size}; bool big_page{[&]() { - if (Common::IsAligned(handle->align, vm.big_page_size)) + if (Common::IsAligned(handle->align, vm.big_page_size)) { return true; - else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) + } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) { return false; - else { + } else { ASSERT(false); return false; } @@ -450,7 +452,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector& input, std::vector& input, std::vectorinitiated) { + if (channel_state->initialized) { LOG_CRITICAL(Service_NVDRV, "Already allocated!"); return NvResult::AlreadyAllocated; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index fed5370394..1703f9cc37 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -5,13 +5,12 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -u32 nvhost_nvdec::next_id{}; - nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} nvhost_nvdec::~nvhost_nvdec() = default; @@ -22,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& case 0x0: switch (command.cmd) { case 0x1: { - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; } return Submit(fd, input, output); } @@ -74,8 +74,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) { void nvhost_nvdec::OnClose(DeviceFD fd) { LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } system.AudioCore().SetNVDECActive(false); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 3261ce1d40..c1b4e53e83 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -22,9 +22,6 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 2ec1ad3e9e..99eede702f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -46,13 +46,11 @@ std::size_t WriteVectors(std::vector& dst, const std::vector& src, std::s } } // Anonymous namespace -std::unordered_map nvhost_nvdec_common::fd_to_id{}; -std::deque nvhost_nvdec_common::syncpts_accumulated{}; - nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, NvCore::ChannelType channel_type_) : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { + auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated; if (syncpts_accumulated.empty()) { channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); } else { @@ -60,8 +58,9 @@ nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Containe syncpts_accumulated.pop_front(); } } + nvhost_nvdec_common::~nvhost_nvdec_common() { - syncpts_accumulated.push_back(channel_syncpoint); + core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); } NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector& input) { @@ -108,7 +107,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector& input, Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), cmdlist.size() * sizeof(u32)); - gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); + gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); } std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); // Some games expect command_buffers to be written back @@ -186,8 +185,4 @@ Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) { return nullptr; } -void nvhost_nvdec_common::Reset() { - fd_to_id.clear(); -} - } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 93990bb9b8..fe76100c86 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -25,8 +25,6 @@ public: NvCore::ChannelType channel_type); ~nvhost_nvdec_common() override; - static void Reset(); - protected: struct IoctlSetNvmapFD { s32_le nvmap_fd{}; @@ -119,7 +117,6 @@ protected: Kernel::KEvent* QueryEvent(u32 event_id) override; - static std::unordered_map fd_to_id; u32 channel_syncpoint; s32_le nvmap_fd{}; u32_le submit_timeout{}; @@ -128,8 +125,6 @@ protected: NvCore::NvMap& nvmap; NvCore::ChannelType channel_type; std::array device_syncpoints{}; - - static std::deque syncpts_accumulated; }; }; // namespace Devices } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 2e4ff988cd..73f97136e3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -4,13 +4,12 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "video_core/renderer_base.h" namespace Service::Nvidia::Devices { -u32 nvhost_vic::next_id{}; - nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} @@ -21,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector& i switch (command.group) { case 0x0: switch (command.cmd) { - case 0x1: - if (!fd_to_id.contains(fd)) { - fd_to_id[fd] = next_id++; + case 0x1: { + auto& host1x_file = core.Host1xDeviceFile(); + if (!host1x_file.fd_to_id.contains(fd)) { + host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; } return Submit(fd, input, output); + } case 0x2: return GetSyncpoint(input, output); case 0x3: @@ -69,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector& i void nvhost_vic::OnOpen(DeviceFD fd) {} void nvhost_vic::OnClose(DeviceFD fd) { - const auto iter = fd_to_id.find(fd); - if (iter != fd_to_id.end()) { + auto& host1x_file = core.Host1xDeviceFile(); + const auto iter = host1x_file.fd_to_id.find(fd); + if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index 59e23b41ef..f164caafb3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -21,8 +21,5 @@ public: void OnOpen(DeviceFD fd) override; void OnClose(DeviceFD fd) override; - -private: - static u32 next_id; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 52e1d7cff9..e9bfd03580 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -23,8 +23,8 @@ public: explicit nvmap(Core::System& system_, NvCore::Container& container); ~nvmap() override; - nvmap(nvmap const&) = delete; - nvmap& operator=(nvmap const&) = delete; + nvmap(const nvmap&) = delete; + nvmap& operator=(const nvmap&) = delete; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector& input, std::vector& output) override; diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 7929443d27..5e7b7468f4 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -101,9 +101,7 @@ Module::Module(Core::System& system) }; } -Module::~Module() { - Devices::nvhost_nvdec_common::Reset(); -} +Module::~Module() {} NvResult Module::VerifyFD(DeviceFD fd) const { if (fd < 0) { diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index a2aeb80b49..146d046a93 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -46,7 +46,7 @@ class Module; class EventInterface { public: - EventInterface(Module& module_); + explicit EventInterface(Module& module_); ~EventInterface(); Kernel::KEvent* CreateEvent(std::string name); diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp index b04922ac0e..cdecc3a91f 100644 --- a/src/video_core/control/channel_state.cpp +++ b/src/video_core/control/channel_state.cpp @@ -14,10 +14,7 @@ namespace Tegra::Control { -ChannelState::ChannelState(s32 bind_id_) { - bind_id = bind_id_; - initiated = false; -} +ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {} void ChannelState::Init(Core::System& system, GPU& gpu) { ASSERT(memory_manager); @@ -27,7 +24,7 @@ void ChannelState::Init(Core::System& system, GPU& gpu) { kepler_compute = std::make_unique(system, *memory_manager); maxwell_dma = std::make_unique(system, *memory_manager); kepler_memory = std::make_unique(system, *memory_manager); - initiated = true; + initialized = true; } void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h index 305b21cbaa..3a7b9872c1 100644 --- a/src/video_core/control/channel_state.h +++ b/src/video_core/control/channel_state.h @@ -34,7 +34,7 @@ class DmaPusher; namespace Control { struct ChannelState { - ChannelState(s32 bind_id); + explicit ChannelState(s32 bind_id); ChannelState(const ChannelState& state) = delete; ChannelState& operator=(const ChannelState&) = delete; ChannelState(ChannelState&& other) noexcept = default; @@ -60,7 +60,7 @@ struct ChannelState { std::unique_ptr dma_pusher; - bool initiated{}; + bool initialized{}; }; } // namespace Control diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 5246192a80..584a0c26cd 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -32,7 +32,7 @@ namespace VideoCommon { class ChannelInfo { public: ChannelInfo() = delete; - ChannelInfo(Tegra::Control::ChannelState& state); + explicit ChannelInfo(Tegra::Control::ChannelState& state); ChannelInfo(const ChannelInfo& state) = delete; ChannelInfo& operator=(const ChannelInfo&) = delete; ChannelInfo(ChannelInfo&& other) = default; diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp index 7330426909..f7cbe204ee 100644 --- a/src/video_core/control/scheduler.cpp +++ b/src/video_core/control/scheduler.cpp @@ -3,6 +3,7 @@ #include +#include "common/assert.h" #include "video_core/control/channel_state.h" #include "video_core/control/scheduler.h" #include "video_core/gpu.h" @@ -13,8 +14,9 @@ Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} Scheduler::~Scheduler() = default; void Scheduler::Push(s32 channel, CommandList&& entries) { - std::unique_lock lk(scheduling_guard); + std::unique_lock lk(scheduling_guard); auto it = channels.find(channel); + ASSERT(it != channels.end()); auto channel_state = it->second; gpu.BindChannel(channel_state->bind_id); channel_state->dma_pusher->Push(std::move(entries)); @@ -23,7 +25,7 @@ void Scheduler::Push(s32 channel, CommandList&& entries) { void Scheduler::DeclareChannel(std::shared_ptr new_channel) { s32 channel = new_channel->bind_id; - std::unique_lock lk(scheduling_guard); + std::unique_lock lk(scheduling_guard); channels.emplace(channel, new_channel); } diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h index 305a01e0a0..44addf61c4 100644 --- a/src/video_core/control/scheduler.h +++ b/src/video_core/control/scheduler.h @@ -19,7 +19,7 @@ struct ChannelState; class Scheduler { public: - Scheduler(GPU& gpu_); + explicit Scheduler(GPU& gpu_); ~Scheduler(); void Push(s32 channel, CommandList&& entries); diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 9c5d567a67..bc48320ce2 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -195,7 +195,7 @@ public: BitField<24, 2, u32> num_dst_components_minus_one; }; - Swizzle GetComponent(size_t i) { + Swizzle GetComponent(size_t i) const { const u32 raw = dst_components_raw; return static_cast((raw >> (i * 3)) & 0x7); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index d7a3dd96ba..28b38273e8 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -355,7 +355,7 @@ struct GPU::Impl { std::condition_variable sync_cv; - std::list> sync_requests; + std::list> sync_requests; std::atomic current_sync_fence{}; u64 last_sync_fence{}; std::mutex sync_request_mutex; diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 7ecf853d95..57082ae54f 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -19,7 +19,7 @@ namespace Host1x { class Host1x { public: - Host1x(Core::System& system); + explicit Host1x(Core::System& system); SyncpointManager& GetSyncpointManager() { return syncpoint_manager; diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp index 4471bacae2..326e8355ad 100644 --- a/src/video_core/host1x/syncpoint_manager.cpp +++ b/src/video_core/host1x/syncpoint_manager.cpp @@ -12,13 +12,13 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); SyncpointManager::ActionHandle SyncpointManager::RegisterAction( std::atomic& syncpoint, std::list& action_storage, u32 expected_value, - std::function& action) { + std::function&& action) { if (syncpoint.load(std::memory_order_acquire) >= expected_value) { action(); return {}; } - std::unique_lock lk(guard); + std::unique_lock lk(guard); if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { action(); return {}; @@ -30,12 +30,12 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction( } ++it; } - return action_storage.emplace(it, expected_value, action); + return action_storage.emplace(it, expected_value, std::move(action)); } void SyncpointManager::DeregisterAction(std::list& action_storage, ActionHandle& handle) { - std::unique_lock lk(guard); + std::unique_lock lk(guard); action_storage.erase(handle); } @@ -68,7 +68,7 @@ void SyncpointManager::Increment(std::atomic& syncpoint, std::condition_var std::list& action_storage) { auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; - std::unique_lock lk(guard); + std::unique_lock lk(guard); auto it = action_storage.begin(); while (it != action_storage.end()) { if (it->expected_value > new_value) { @@ -87,7 +87,7 @@ void SyncpointManager::Wait(std::atomic& syncpoint, std::condition_variable return; } - std::unique_lock lk(guard); + std::unique_lock lk(guard); wait_cv.wait(lk, pred); } diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h index 72220a09a5..50a264e23f 100644 --- a/src/video_core/host1x/syncpoint_manager.h +++ b/src/video_core/host1x/syncpoint_manager.h @@ -18,34 +18,34 @@ namespace Host1x { class SyncpointManager { public: - u32 GetGuestSyncpointValue(u32 id) { + u32 GetGuestSyncpointValue(u32 id) const { return syncpoints_guest[id].load(std::memory_order_acquire); } - u32 GetHostSyncpointValue(u32 id) { + u32 GetHostSyncpointValue(u32 id) const { return syncpoints_host[id].load(std::memory_order_acquire); } struct RegisteredAction { - RegisteredAction(u32 expected_value_, std::function& action_) - : expected_value{expected_value_}, action{action_} {} + explicit RegisteredAction(u32 expected_value_, std::function&& action_) + : expected_value{expected_value_}, action{std::move(action_)} {} u32 expected_value; - std::function action; + std::function action; }; using ActionHandle = std::list::iterator; template ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { - std::function func(action); + std::function func(action); return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], - expected_value, func); + expected_value, std::move(func)); } template ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { - std::function func(action); + std::function func(action); return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], - expected_value, func); + expected_value, std::move(func)); } void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); @@ -60,11 +60,11 @@ public: void WaitHost(u32 syncpoint_id, u32 expected_value); - bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) { + bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const { return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; } - bool IsReadyHost(u32 syncpoint_id, u32 expected_value) { + bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const { return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; } @@ -74,7 +74,7 @@ private: ActionHandle RegisterAction(std::atomic& syncpoint, std::list& action_storage, u32 expected_value, - std::function& action); + std::function&& action); void DeregisterAction(std::list& action_storage, ActionHandle& handle); diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index ae4fd98dfa..f992e29f3d 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -126,7 +126,7 @@ private: void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); template - [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { + [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { if constexpr (is_big_page) { return (gpu_addr >> big_page_bits) & big_page_table_mask; } else {