mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2024-12-26 09:01:20 -08:00
Texture Cache: Implement Blacklisting.
This commit is contained in:
parent
138d9d7eff
commit
0e8cf38f39
@ -111,6 +111,7 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
std::array<ImageId, max_elements> image_view_ids;
|
||||
boost::container::static_vector<u32, max_elements> image_view_indices;
|
||||
boost::container::static_vector<VkSampler, max_elements> samplers;
|
||||
boost::container::static_vector<bool, max_elements> image_view_blacklist;
|
||||
|
||||
const auto& qmd{kepler_compute.launch_description};
|
||||
const auto& cbufs{qmd.const_buffer_config};
|
||||
@ -151,10 +152,34 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
samplers.push_back(sampler->Handle());
|
||||
}
|
||||
}
|
||||
std::ranges::for_each(info.image_descriptors, add_image);
|
||||
const u32 black_list_base = image_view_indices.size();
|
||||
bool atleast_one_blacklisted = false;
|
||||
for (const auto& desc : info.image_descriptors) {
|
||||
const bool is_black_listed =
|
||||
desc.is_written && (desc.type == Shader::TextureType::Color2D ||
|
||||
desc.type == Shader::TextureType::ColorArray2D);
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
image_view_blacklist.push_back(is_black_listed);
|
||||
}
|
||||
atleast_one_blacklisted |= is_black_listed;
|
||||
add_image(desc);
|
||||
}
|
||||
|
||||
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
|
||||
bool has_listed_stuffs;
|
||||
do {
|
||||
has_listed_stuffs = false;
|
||||
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
|
||||
if (atleast_one_blacklisted) {
|
||||
for (u32 index = 0; index < image_view_blacklist.size(); index++) {
|
||||
if (image_view_blacklist[index]) {
|
||||
ImageView& image_view{
|
||||
texture_cache.GetImageView(image_view_ids[index + black_list_base])};
|
||||
has_listed_stuffs |= texture_cache.BlackListImage(image_view.image_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (has_listed_stuffs);
|
||||
|
||||
buffer_cache.UnbindComputeTextureBuffers();
|
||||
ImageId* texture_buffer_ids{image_view_ids.data()};
|
||||
|
@ -280,6 +280,7 @@ template <typename Spec>
|
||||
void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
|
||||
std::array<ImageId, MAX_IMAGE_ELEMENTS> image_view_ids;
|
||||
std::array<u32, MAX_IMAGE_ELEMENTS> image_view_indices;
|
||||
std::array<bool, MAX_IMAGE_ELEMENTS> image_view_blacklist;
|
||||
std::array<VkSampler, MAX_IMAGE_ELEMENTS> samplers;
|
||||
size_t sampler_index{};
|
||||
size_t image_index{};
|
||||
@ -290,6 +291,8 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
|
||||
|
||||
const auto& regs{maxwell3d.regs};
|
||||
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
|
||||
u32 start_black_list = std::numeric_limits<u32>::max();
|
||||
u32 end_black_list = 0;
|
||||
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
|
||||
const Shader::Info& info{stage_infos[stage]};
|
||||
buffer_cache.UnbindGraphicsStorageBuffers(stage);
|
||||
@ -350,6 +353,15 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
|
||||
}
|
||||
if constexpr (Spec::has_images) {
|
||||
for (const auto& desc : info.image_descriptors) {
|
||||
if (desc.is_written && (desc.type == Shader::TextureType::Color2D ||
|
||||
desc.type == Shader::TextureType::ColorArray2D)) {
|
||||
auto index_copy = image_index;
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
start_black_list = std::min<u32>(start_black_list, index_copy);
|
||||
image_view_blacklist[index_copy++] = true;
|
||||
end_black_list = std::max<u32>(end_black_list, index_copy);
|
||||
}
|
||||
}
|
||||
add_image(desc);
|
||||
}
|
||||
}
|
||||
@ -370,7 +382,21 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
|
||||
config_stage(4);
|
||||
}
|
||||
const std::span indices_span(image_view_indices.data(), image_index);
|
||||
bool has_listed_stuffs;
|
||||
do {
|
||||
has_listed_stuffs = false;
|
||||
texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
|
||||
if constexpr (Spec::has_images) {
|
||||
if (start_black_list < end_black_list) {
|
||||
for (u32 index = start_black_list; index < end_black_list; index++) {
|
||||
if (image_view_blacklist[index]) {
|
||||
ImageView& image_view{texture_cache.GetImageView(image_view_ids[index])};
|
||||
has_listed_stuffs |= texture_cache.BlackListImage(image_view.image_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (has_listed_stuffs);
|
||||
|
||||
ImageId* texture_buffer_index{image_view_ids.data()};
|
||||
const auto bind_stage_info{[&](size_t stage) LAMBDA_FORCEINLINE {
|
||||
|
@ -37,6 +37,7 @@ enum class ImageFlagBits : u32 {
|
||||
// Rescaler
|
||||
Rescaled = 1 << 12,
|
||||
RescaleChecked = 1 << 13,
|
||||
Blacklisted = 1 << 14,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)
|
||||
|
||||
|
@ -227,6 +227,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
|
||||
flags[Dirty::RenderTargetControl] = false;
|
||||
|
||||
bool can_rescale = true;
|
||||
bool any_blacklisted = false;
|
||||
std::array<ImageId, NUM_RT> tmp_color_images{};
|
||||
ImageId tmp_depth_image{};
|
||||
const auto check_rescale = [&](ImageViewId view_id, ImageId& id_save) {
|
||||
@ -236,6 +237,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
|
||||
id_save = image_id;
|
||||
auto& image = slot_images[image_id];
|
||||
can_rescale &= ImageCanRescale(image);
|
||||
any_blacklisted |= True(image.flags & ImageFlagBits::Blacklisted);
|
||||
} else {
|
||||
id_save = CORRUPT_ID;
|
||||
}
|
||||
@ -268,10 +270,13 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
|
||||
scale_up(tmp_depth_image);
|
||||
} else {
|
||||
rescaled = false;
|
||||
const auto scale_down = [this](ImageId image_id) {
|
||||
const auto scale_down = [this, any_blacklisted](ImageId image_id) {
|
||||
if (image_id != CORRUPT_ID) {
|
||||
Image& image = slot_images[image_id];
|
||||
ScaleDown(image);
|
||||
if (any_blacklisted) {
|
||||
image.flags |= ImageFlagBits::Blacklisted;
|
||||
}
|
||||
}
|
||||
};
|
||||
for (size_t index = 0; index < NUM_RT; ++index) {
|
||||
@ -736,8 +741,22 @@ ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
|
||||
return image_id;
|
||||
}
|
||||
|
||||
template <class P>
|
||||
bool TextureCache<P>::BlackListImage(ImageId image_id) {
|
||||
auto& image = slot_images[image_id];
|
||||
if (True(image.flags & ImageFlagBits::Blacklisted)) {
|
||||
return false;
|
||||
}
|
||||
image.flags |= ImageFlagBits::Blacklisted;
|
||||
ScaleDown(image);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class P>
|
||||
bool TextureCache<P>::ImageCanRescale(Image& image) {
|
||||
if (True(image.flags & ImageFlagBits::Blacklisted)) {
|
||||
return false;
|
||||
}
|
||||
if (True(image.flags & ImageFlagBits::Rescaled) ||
|
||||
True(image.flags & ImageFlagBits::RescaleChecked)) {
|
||||
return true;
|
||||
@ -912,6 +931,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
||||
bool can_rescale =
|
||||
(info.type == ImageType::e1D || info.type == ImageType::e2D) && info.block.depth == 0;
|
||||
bool any_rescaled = false;
|
||||
bool any_blacklisted = false;
|
||||
for (const ImageId sibling_id : all_siblings) {
|
||||
if (!can_rescale) {
|
||||
break;
|
||||
@ -919,6 +939,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
||||
Image& sibling = slot_images[sibling_id];
|
||||
can_rescale &= ImageCanRescale(sibling);
|
||||
any_rescaled |= True(sibling.flags & ImageFlagBits::Rescaled);
|
||||
any_blacklisted |= True(sibling.flags & ImageFlagBits::Blacklisted);
|
||||
}
|
||||
|
||||
can_rescale &= any_rescaled;
|
||||
@ -932,6 +953,9 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
||||
for (const ImageId sibling_id : all_siblings) {
|
||||
Image& sibling = slot_images[sibling_id];
|
||||
ScaleDown(sibling);
|
||||
if (any_blacklisted) {
|
||||
sibling.flags |= ImageFlagBits::Blacklisted;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1556,6 +1580,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
|
||||
boost::container::small_vector<const AliasedImage*, 1> aliased_images;
|
||||
Image& image = slot_images[image_id];
|
||||
bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled);
|
||||
bool any_blacklisted = True(image.flags & ImageFlagBits::Blacklisted);
|
||||
u64 most_recent_tick = image.modification_tick;
|
||||
for (const AliasedImage& aliased : image.aliased_images) {
|
||||
ImageBase& aliased_image = slot_images[aliased.id];
|
||||
@ -1563,6 +1588,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
|
||||
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
|
||||
aliased_images.push_back(&aliased);
|
||||
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
|
||||
any_blacklisted |= True(aliased_image.flags & ImageFlagBits::Blacklisted);
|
||||
}
|
||||
}
|
||||
if (aliased_images.empty()) {
|
||||
@ -1574,6 +1600,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
|
||||
ScaleUp(image);
|
||||
} else {
|
||||
ScaleDown(image);
|
||||
if (any_blacklisted) {
|
||||
image.flags |= ImageFlagBits::Blacklisted;
|
||||
}
|
||||
}
|
||||
}
|
||||
image.modification_tick = most_recent_tick;
|
||||
@ -1589,6 +1618,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
|
||||
ScaleUp(aliased_image);
|
||||
} else {
|
||||
ScaleDown(aliased_image);
|
||||
if (any_blacklisted) {
|
||||
aliased_image.flags |= ImageFlagBits::Blacklisted;
|
||||
}
|
||||
}
|
||||
}
|
||||
CopyImage(image_id, aliased->id, aliased->copies);
|
||||
|
@ -176,6 +176,8 @@ public:
|
||||
|
||||
[[nodiscard]] bool IsRescaling();
|
||||
|
||||
[[nodiscard]] bool BlackListImage(ImageId image_id);
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
private:
|
||||
|
Loading…
Reference in New Issue
Block a user