Merge branch 'master' of https://github.com/mapnik/mapnik into feature/cmake-static

This commit is contained in:
Mathis Logemann 2022-08-17 17:34:52 +02:00
commit 213be3777c
71 changed files with 17507 additions and 14528 deletions

View file

@ -30,7 +30,7 @@ BraceWrapping:
AfterFunction: true # see AllowShortFunctionsOnASingleLine
AfterNamespace: false
AfterStruct: true
AfterExternBlock: true
AfterExternBlock: false
BeforeCatch: false
BeforeElse: true
BeforeLambdaBody: false

View file

@ -9,7 +9,17 @@ on:
- "**"
jobs:
checkSource:
name: Check Source Code
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
- uses: pre-commit/action@v3.0.0
windows:
needs: checkSource
name: Windows memory mapped
uses: ./.github/workflows/windows.yml
with:
@ -22,6 +32,7 @@ jobs:
NUGET_REGISTRY_PAT: ${{ secrets.VCPKG_CACHE_PAT }}
windows-mmf-off:
needs: checkSource
name: Windows file based
uses: ./.github/workflows/windows.yml
with:
@ -46,6 +57,7 @@ jobs:
NUGET_REGISTRY_PAT: ${{ secrets.VCPKG_CACHE_PAT }}
ubuntu:
needs: checkSource
name: Linux memory mapped
uses: ./.github/workflows/ubuntu.yml
with:
@ -58,6 +70,7 @@ jobs:
NUGET_REGISTRY_PAT: ${{ secrets.VCPKG_CACHE_PAT }}
ubuntu-mmf-off:
needs: checkSource
name: Linux file based
uses: ./.github/workflows/ubuntu.yml
with:
@ -82,6 +95,7 @@ jobs:
NUGET_REGISTRY_PAT: ${{ secrets.VCPKG_CACHE_PAT }}
macos:
needs: checkSource
name: MacOS memory mapped
uses: ./.github/workflows/macos.yml
with:
@ -94,6 +108,7 @@ jobs:
NUGET_REGISTRY_PAT: ${{ secrets.VCPKG_CACHE_PAT }}
macos-mmf-off:
needs: checkSource
name: MacOS file based
uses: ./.github/workflows/macos.yml
with:

16
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,16 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
files: ^.*\.cmake|CMakeLists\.txt$
- id: end-of-file-fixer
files: ^.*\.cmake|CMakeLists\.txt$
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v14.0.6
hooks:
- id: clang-format
types_or: [c++, c]

View file

@ -55,7 +55,8 @@ target_link_libraries(mapnik-viewer PRIVATE
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/viewer.ini
"[mapnik]
plugins_dir=${PLUGINS_INSTALL_DIR}
fonts/1/dir=${FONTS_INSTALL_DIR}"
fonts/1/dir=${FONTS_INSTALL_DIR}
fonts/size=1"
)
if(QT_VERSION_MAJOR EQUAL 6)

1
deps/agg/.clang-format vendored Normal file
View file

@ -0,0 +1 @@
DisableFormat: true

View file

@ -365,7 +365,7 @@ namespace agg
inline bool is_close(unsigned c)
{
return (c & ~(path_flags_cw | path_flags_ccw)) ==
(path_cmd_end_poly | path_flags_close);
(path_cmd_end_poly | static_cast<path_commands_e>(path_flags_close));
}
//------------------------------------------------------------is_next_poly

View file

@ -436,7 +436,8 @@ struct gray16
static value_type luminance(const rgba& c)
{
// Calculate grayscale value as per ITU-R BT.709.
return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b)
* static_cast<double>(base_mask)));
}
static value_type luminance(const rgba16& c)
@ -537,13 +538,13 @@ struct gray16
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
return static_cast<double>(a) / static_cast<double>(base_mask);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
return value_type(uround(a * static_cast<double>(base_mask)));
}
//--------------------------------------------------------------------
@ -698,7 +699,7 @@ struct gray16
self_type gradient(self_type c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_scale);
calc_type ik = uround(k * static_cast<double>(base_scale));
ret.v = lerp(v, c.v, ik);
ret.a = lerp(a, c.a, ik);
return ret;
@ -949,7 +950,7 @@ struct gray32
//--------------------------------------------------------------------
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return value_type(a * b / cover_mask);
return value_type(a * b / static_cast<double>(cover_mask));
}
//--------------------------------------------------------------------

View file

@ -281,10 +281,10 @@ struct rgba8T
static void convert(rgba8T<linear>& dst, const rgba& src)
{
dst.r = value_type(uround(src.r * base_mask));
dst.g = value_type(uround(src.g * base_mask));
dst.b = value_type(uround(src.b * base_mask));
dst.a = value_type(uround(src.a * base_mask));
dst.r = value_type(uround(src.r * static_cast<double>(base_mask)));
dst.g = value_type(uround(src.g * static_cast<double>(base_mask)));
dst.b = value_type(uround(src.b * static_cast<double>(base_mask)));
dst.a = value_type(uround(src.a * static_cast<double>(base_mask)));
}
static void convert(rgba8T<sRGB>& dst, const rgba& src)
@ -761,13 +761,13 @@ struct rgba16
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
return static_cast<double>(a) / static_cast<double>(base_mask);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
return value_type(uround(a * static_cast<double>(base_mask)));
}
//--------------------------------------------------------------------
@ -955,7 +955,7 @@ struct rgba16
AGG_INLINE self_type gradient(const self_type& c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_mask);
calc_type ik = uround(k * static_cast<double>(base_mask));
ret.r = lerp(r, c.r, ik);
ret.g = lerp(g, c.g, ik);
ret.b = lerp(b, c.b, ik);
@ -1194,7 +1194,7 @@ struct rgba32
//--------------------------------------------------------------------
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return value_type(a * b / cover_mask);
return value_type(a * b / static_cast<float>(cover_mask));
}
//--------------------------------------------------------------------

View file

@ -103,7 +103,8 @@ namespace agg
if(m_vpgen.auto_close() && m_vertices > 2)
{
m_vpgen.line_to(m_start_x, m_start_y);
m_poly_flags = path_cmd_end_poly | path_flags_close;
m_poly_flags = path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close);
m_start_x = tx;
m_start_y = ty;
m_vertices = -1;
@ -141,7 +142,8 @@ namespace agg
if(m_vpgen.auto_close() && m_vertices > 2)
{
m_vpgen.line_to(m_start_x, m_start_y);
m_poly_flags = path_cmd_end_poly | path_flags_close;
m_poly_flags = path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close);
m_vertices = -2;
continue;
}
@ -157,4 +159,3 @@ namespace agg
#endif

View file

@ -22,6 +22,7 @@
#include "agg_array.h"
#include "agg_math.h"
#include <cstdint>
namespace agg
{
@ -59,7 +60,8 @@ namespace agg
double x = double(i) / double(image_subpixel_scale);
double y = filter.calc_weight(x);
m_weight_array[pivot + i] =
m_weight_array[pivot - i] = (int16)iround(y * image_filter_scale);
m_weight_array[pivot - i] =
static_cast<std::int16_t>(iround(y * static_cast<double>(image_filter_scale)));
}
unsigned end = (diameter() << image_subpixel_shift) - 1;
m_weight_array[0] = m_weight_array[end];
@ -80,7 +82,7 @@ namespace agg
double radius() const { return m_radius; }
unsigned diameter() const { return m_diameter; }
int start() const { return m_start; }
const int16* weight_array() const { return &m_weight_array[0]; }
std::int16_t const* weight_array() const { return &m_weight_array[0]; }
void normalize();
private:
@ -91,7 +93,7 @@ namespace agg
double m_radius;
unsigned m_diameter;
int m_start;
pod_array<int16> m_weight_array;
pod_array<std::int16_t> m_weight_array;
};

View file

@ -44,13 +44,13 @@ namespace agg
//------------------------------------------------------------------line_mr
AGG_INLINE int line_mr(int x)
{
return x >> (line_subpixel_shift - line_mr_subpixel_shift);
return x >> (line_subpixel_shift - static_cast<line_subpixel_scale_e>(line_mr_subpixel_shift));
}
//-------------------------------------------------------------------line_hr
AGG_INLINE int line_hr(int x)
{
return x << (line_subpixel_shift - line_mr_subpixel_shift);
return x << (line_subpixel_shift - static_cast<line_subpixel_scale_e>(line_mr_subpixel_shift));
}
//---------------------------------------------------------------line_dbl_hr
@ -64,7 +64,7 @@ namespace agg
{
AGG_INLINE static int conv(double x)
{
return iround(x * line_subpixel_scale);
return iround(x * static_cast<double>(line_subpixel_scale));
}
};
@ -73,7 +73,7 @@ namespace agg
{
AGG_INLINE static int conv(double x)
{
return saturation<line_max_coord>::iround(x * line_subpixel_scale);
return saturation<line_max_coord>::iround(x * static_cast<double>(line_subpixel_scale));
}
};

View file

@ -398,7 +398,8 @@ namespace agg
if(m_closed && !m_stop)
{
m_stop = true;
return path_cmd_end_poly | path_flags_close;
return path_cmd_end_poly
| static_cast<agg::path_commands_e>(path_flags_close);
}
return path_cmd_stop;
}
@ -463,7 +464,8 @@ namespace agg
if(m_closed && !m_stop)
{
m_stop = true;
return path_cmd_end_poly | path_flags_close;
return path_cmd_end_poly
| static_cast<agg::path_commands_e>(path_flags_close);
}
return path_cmd_stop;
}
@ -525,7 +527,8 @@ namespace agg
if(m_closed && !m_stop)
{
m_stop = true;
return path_cmd_end_poly | path_flags_close;
return path_cmd_end_poly
| static_cast<agg::path_commands_e>(path_flags_close);
}
return path_cmd_stop;
}

View file

@ -54,7 +54,7 @@ namespace agg
if (cover < cover_full)
{
double x = double(cover) / cover_full;
double x = static_cast<double>(cover) / static_cast<double>(cover_full);
c.r *= x;
c.g *= x;
c.b *= x;

View file

@ -35,7 +35,7 @@ namespace agg
}
static int xi(int v) { return v; }
static int yi(int v) { return v; }
static int upscale(double v) { return iround(v * poly_subpixel_scale); }
static int upscale(double v) { return iround(v * static_cast<double>(poly_subpixel_scale)); }
static int downscale(int v) { return v; }
};
@ -51,7 +51,7 @@ namespace agg
static int yi(int v) { return v; }
static int upscale(double v)
{
return saturation<poly_max_coord>::iround(v * poly_subpixel_scale);
return saturation<poly_max_coord>::iround(v * static_cast<double>(poly_subpixel_scale));
}
static int downscale(int v) { return v; }
};
@ -66,7 +66,7 @@ namespace agg
}
static int xi(int v) { return v * 3; }
static int yi(int v) { return v; }
static int upscale(double v) { return iround(v * poly_subpixel_scale); }
static int upscale(double v) { return iround(v * static_cast<double>(poly_subpixel_scale)); }
static int downscale(int v) { return v; }
};
@ -78,10 +78,10 @@ namespace agg
{
return a * b / c;
}
static int xi(double v) { return iround(v * poly_subpixel_scale); }
static int yi(double v) { return iround(v * poly_subpixel_scale); }
static int xi(double v) { return iround(v * static_cast<double>(poly_subpixel_scale)); }
static int yi(double v) { return iround(v * static_cast<double>(poly_subpixel_scale)); }
static double upscale(double v) { return v; }
static double downscale(int v) { return v / double(poly_subpixel_scale); }
static double downscale(int v) { return v / static_cast<double>(poly_subpixel_scale); }
};
//--------------------------------------------------------ras_conv_dbl_3x
@ -92,10 +92,10 @@ namespace agg
{
return a * b / c;
}
static int xi(double v) { return iround(v * poly_subpixel_scale * 3); }
static int yi(double v) { return iround(v * poly_subpixel_scale); }
static int xi(double v) { return iround(v * static_cast<double>(poly_subpixel_scale) * 3); }
static int yi(double v) { return iround(v * static_cast<double>(poly_subpixel_scale)); }
static double upscale(double v) { return v; }
static double downscale(int v) { return v / double(poly_subpixel_scale); }
static double downscale(int v) { return v / static_cast<double>(poly_subpixel_scale); }
};

View file

@ -1305,7 +1305,7 @@ namespace agg
for(i = 0; i < aa_scale; i++)
{
m_gamma[i] = value_type(
uround(gamma_function(double(i) / aa_mask) * aa_mask));
uround(gamma_function(static_cast<double>(i) / static_cast<double>(aa_mask)) * aa_mask));
}
}

View file

@ -66,7 +66,9 @@ void arrowhead::rewind(unsigned path_id)
m_cmd[3] = path_cmd_line_to;
m_cmd[4] = path_cmd_line_to;
m_cmd[5] = path_cmd_line_to;
m_cmd[7] = path_cmd_end_poly | path_flags_close | path_flags_ccw;
m_cmd[7] = path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_ccw);
m_cmd[6] = path_cmd_stop;
return;
}
@ -87,7 +89,9 @@ void arrowhead::rewind(unsigned path_id)
m_cmd[1] = path_cmd_line_to;
m_cmd[2] = path_cmd_line_to;
m_cmd[3] = path_cmd_line_to;
m_cmd[4] = path_cmd_end_poly | path_flags_close | path_flags_ccw;
m_cmd[4] = path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_ccw);
m_cmd[5] = path_cmd_stop;
return;
}

View file

@ -42,7 +42,7 @@ void line_profile_aa::width(double w)
//---------------------------------------------------------------------
line_profile_aa::value_type* line_profile_aa::profile(double w)
{
m_subpixel_width = uround(w * subpixel_scale);
m_subpixel_width = uround(w * static_cast<double>(subpixel_scale));
unsigned size = m_subpixel_width + subpixel_scale * 6;
if(size > m_profile.size())
{
@ -56,8 +56,8 @@ line_profile_aa::value_type* line_profile_aa::profile(double w)
void line_profile_aa::set(double center_width, double smoother_width)
{
double base_val = 1.0;
if(center_width == 0.0) center_width = 1.0 / subpixel_scale;
if(smoother_width == 0.0) smoother_width = 1.0 / subpixel_scale;
if(center_width == 0.0) center_width = 1.0 / static_cast<double>(subpixel_scale);
if(smoother_width == 0.0) smoother_width = 1.0 / static_cast<double>(subpixel_scale);
double width = center_width + smoother_width;
if(width < m_min_width)
@ -70,15 +70,15 @@ void line_profile_aa::set(double center_width, double smoother_width)
value_type* ch = profile(center_width + smoother_width);
unsigned subpixel_center_width = unsigned(center_width * subpixel_scale);
unsigned subpixel_smoother_width = unsigned(smoother_width * subpixel_scale);
unsigned subpixel_center_width = unsigned(center_width * static_cast<double>(subpixel_scale));
unsigned subpixel_smoother_width = unsigned(smoother_width * static_cast<double>(subpixel_scale));
value_type* ch_center = ch + subpixel_scale*2;
value_type* ch_center = ch + subpixel_scale * 2;
value_type* ch_smoother = ch_center + subpixel_center_width;
unsigned i;
unsigned val = m_gamma[unsigned(base_val * aa_mask)];
unsigned val = m_gamma[unsigned(base_val * static_cast<double>(aa_mask))];
ch = ch_center;
for(i = 0; i < subpixel_center_width; i++)
{
@ -90,7 +90,7 @@ void line_profile_aa::set(double center_width, double smoother_width)
*ch_smoother++ =
m_gamma[unsigned((base_val -
base_val *
(double(i) / subpixel_smoother_width)) * aa_mask)];
(double(i) / subpixel_smoother_width)) * static_cast<double>(aa_mask))];
}
unsigned n_smoother = profile_size() -
@ -113,4 +113,3 @@ void line_profile_aa::set(double center_width, double smoother_width)
}

View file

@ -152,7 +152,9 @@ unsigned rounded_rect::vertex(double* x, double* y)
else return path_cmd_line_to;
case 8:
cmd = path_cmd_end_poly | path_flags_close | path_flags_ccw;
cmd = path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_ccw);
m_status++;
break;
}
@ -161,4 +163,3 @@ unsigned rounded_rect::vertex(double* x, double* y)
}

View file

@ -153,7 +153,9 @@ unsigned vcgen_contour::vertex(double* x, double* y)
case end_poly:
if(!m_closed) return path_cmd_stop;
m_status = stop;
return path_cmd_end_poly | path_flags_close | path_flags_ccw;
return path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_ccw);
case stop:
return path_cmd_stop;

View file

@ -196,11 +196,15 @@ unsigned vcgen_stroke::vertex(double* x, double* y)
case end_poly1:
m_status = m_prev_status;
return path_cmd_end_poly | path_flags_close | path_flags_ccw;
return path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_ccw);
case end_poly2:
m_status = m_prev_status;
return path_cmd_end_poly | path_flags_close | path_flags_cw;
return path_cmd_end_poly
| static_cast<path_commands_e>(path_flags_close)
| static_cast<path_commands_e>(path_flags_cw);
case stop:
cmd = path_cmd_stop;

1
deps/boost/.clang-format vendored Normal file
View file

@ -0,0 +1 @@
DisableFormat: true

1
deps/mapbox/.clang-format vendored Normal file
View file

@ -0,0 +1 @@
DisableFormat: true

File diff suppressed because it is too large Load diff

View file

@ -43,9 +43,9 @@
#include <mapnik/sparsehash/internal/sparseconfig.h>
#include <assert.h>
#include <stdio.h>
#include <stddef.h> // for size_t
#include <stddef.h> // for size_t
#include <iosfwd>
#include <stdexcept> // For length_error
#include <stdexcept> // For length_error
_START_GOOGLE_NAMESPACE_
@ -80,15 +80,15 @@ namespace sparsehash_internal {
// ----- low-level I/O for FILE* ----
template<typename Ignored>
inline bool read_data_internal(Ignored*, FILE* fp,
void* data, size_t length) {
return fread(data, length, 1, fp) == 1;
inline bool read_data_internal(Ignored*, FILE* fp, void* data, size_t length)
{
return fread(data, length, 1, fp) == 1;
}
template<typename Ignored>
inline bool write_data_internal(Ignored*, FILE* fp,
const void* data, size_t length) {
return fwrite(data, length, 1, fp) == 1;
inline bool write_data_internal(Ignored*, FILE* fp, const void* data, size_t length)
{
return fwrite(data, length, 1, fp) == 1;
}
// ----- low-level I/O for iostream ----
@ -98,55 +98,57 @@ inline bool write_data_internal(Ignored*, FILE* fp,
// it's only legal to delay the instantiation the way we want to if
// the istream/ostream is a template type. So we jump through hoops.
template<typename ISTREAM>
inline bool read_data_internal_for_istream(ISTREAM* fp,
void* data, size_t length) {
return fp->read(reinterpret_cast<char*>(data), length).good();
inline bool read_data_internal_for_istream(ISTREAM* fp, void* data, size_t length)
{
return fp->read(reinterpret_cast<char*>(data), length).good();
}
template<typename Ignored>
inline bool read_data_internal(Ignored*, std::istream* fp,
void* data, size_t length) {
return read_data_internal_for_istream(fp, data, length);
inline bool read_data_internal(Ignored*, std::istream* fp, void* data, size_t length)
{
return read_data_internal_for_istream(fp, data, length);
}
template<typename OSTREAM>
inline bool write_data_internal_for_ostream(OSTREAM* fp,
const void* data, size_t length) {
return fp->write(reinterpret_cast<const char*>(data), length).good();
inline bool write_data_internal_for_ostream(OSTREAM* fp, const void* data, size_t length)
{
return fp->write(reinterpret_cast<const char*>(data), length).good();
}
template<typename Ignored>
inline bool write_data_internal(Ignored*, std::ostream* fp,
const void* data, size_t length) {
return write_data_internal_for_ostream(fp, data, length);
inline bool write_data_internal(Ignored*, std::ostream* fp, const void* data, size_t length)
{
return write_data_internal_for_ostream(fp, data, length);
}
// ----- low-level I/O for custom streams ----
// The INPUT type needs to support a Read() method that takes a
// buffer and a length and returns the number of bytes read.
template <typename INPUT>
inline bool read_data_internal(INPUT* fp, void*,
void* data, size_t length) {
return static_cast<size_t>(fp->Read(data, length)) == length;
template<typename INPUT>
inline bool read_data_internal(INPUT* fp, void*, void* data, size_t length)
{
return static_cast<size_t>(fp->Read(data, length)) == length;
}
// The OUTPUT type needs to support a Write() operation that takes
// a buffer and a length and returns the number of bytes written.
template <typename OUTPUT>
inline bool write_data_internal(OUTPUT* fp, void*,
const void* data, size_t length) {
return static_cast<size_t>(fp->Write(data, length)) == length;
template<typename OUTPUT>
inline bool write_data_internal(OUTPUT* fp, void*, const void* data, size_t length)
{
return static_cast<size_t>(fp->Write(data, length)) == length;
}
// ----- low-level I/O: the public API ----
template <typename INPUT>
inline bool read_data(INPUT* fp, void* data, size_t length) {
return read_data_internal(fp, fp, data, length);
template<typename INPUT>
inline bool read_data(INPUT* fp, void* data, size_t length)
{
return read_data_internal(fp, fp, data, length);
}
template <typename OUTPUT>
inline bool write_data(OUTPUT* fp, const void* data, size_t length) {
return write_data_internal(fp, fp, data, length);
template<typename OUTPUT>
inline bool write_data(OUTPUT* fp, const void* data, size_t length)
{
return write_data_internal(fp, fp, data, length);
}
// Uses read_data() and write_data() to read/write an integer.
@ -154,26 +156,32 @@ inline bool write_data(OUTPUT* fp, const void* data, size_t length) {
// from sizeof(IntType), allowing us to save on a 32-bit system
// and load on a 64-bit system). Excess bytes are taken to be 0.
// INPUT and OUTPUT must match legal inputs to read/write_data (above).
template <typename INPUT, typename IntType>
bool read_bigendian_number(INPUT* fp, IntType* value, size_t length) {
*value = 0;
unsigned char byte;
for (size_t i = 0; i < length; ++i) {
if (!read_data(fp, &byte, sizeof(byte))) return false;
*value |= static_cast<IntType>(byte) << ((length - 1 - i) * 8);
}
return true;
template<typename INPUT, typename IntType>
bool read_bigendian_number(INPUT* fp, IntType* value, size_t length)
{
*value = 0;
unsigned char byte;
for (size_t i = 0; i < length; ++i)
{
if (!read_data(fp, &byte, sizeof(byte)))
return false;
*value |= static_cast<IntType>(byte) << ((length - 1 - i) * 8);
}
return true;
}
template <typename OUTPUT, typename IntType>
bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length) {
unsigned char byte;
for (size_t i = 0; i < length; ++i) {
byte = (sizeof(value) <= length-1 - i)
? 0 : static_cast<unsigned char>((value >> ((length-1 - i) * 8)) & 255);
if (!write_data(fp, &byte, sizeof(byte))) return false;
}
return true;
template<typename OUTPUT, typename IntType>
bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length)
{
unsigned char byte;
for (size_t i = 0; i < length; ++i)
{
byte =
(sizeof(value) <= length - 1 - i) ? 0 : static_cast<unsigned char>((value >> ((length - 1 - i) * 8)) & 255);
if (!write_data(fp, &byte, sizeof(byte)))
return false;
}
return true;
}
// If your keys and values are simple enough, you can pass this
@ -181,19 +189,22 @@ bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length) {
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
// This is the type used for NopointerSerializer.
template <typename value_type> struct pod_serializer {
template <typename INPUT>
bool operator()(INPUT* fp, value_type* value) const {
return read_data(fp, value, sizeof(*value));
}
template<typename value_type>
struct pod_serializer
{
template<typename INPUT>
bool operator()(INPUT* fp, value_type* value) const
{
return read_data(fp, value, sizeof(*value));
}
template <typename OUTPUT>
bool operator()(OUTPUT* fp, const value_type& value) const {
return write_data(fp, &value, sizeof(value));
}
template<typename OUTPUT>
bool operator()(OUTPUT* fp, const value_type& value) const
{
return write_data(fp, &value, sizeof(value));
}
};
// Settings contains parameters for growing and shrinking the table.
// It also packages zero-size functor (ie. hasher).
//
@ -206,165 +217,136 @@ template <typename value_type> struct pod_serializer {
// for sure that the hash is the identity hash. If it's not, this
// is needless work (and possibly, though not likely, harmful).
template<typename Key, typename HashFunc,
typename SizeType, int HT_MIN_BUCKETS>
class sh_hashtable_settings : public HashFunc {
public:
typedef Key key_type;
typedef HashFunc hasher;
typedef SizeType size_type;
template<typename Key, typename HashFunc, typename SizeType, int HT_MIN_BUCKETS>
class sh_hashtable_settings : public HashFunc
{
public:
typedef Key key_type;
typedef HashFunc hasher;
typedef SizeType size_type;
public:
sh_hashtable_settings(const hasher& hf,
const float ht_occupancy_flt,
const float ht_empty_flt)
: hasher(hf),
enlarge_threshold_(0),
shrink_threshold_(0),
consider_shrink_(false),
use_empty_(false),
use_deleted_(false),
num_ht_copies_(0) {
set_enlarge_factor(ht_occupancy_flt);
set_shrink_factor(ht_empty_flt);
}
size_type hash(const key_type& v) const {
// We munge the hash value when we don't trust hasher::operator().
return hash_munger<Key>::MungedHash(hasher::operator()(v));
}
float enlarge_factor() const {
return enlarge_factor_;
}
void set_enlarge_factor(float f) {
enlarge_factor_ = f;
}
float shrink_factor() const {
return shrink_factor_;
}
void set_shrink_factor(float f) {
shrink_factor_ = f;
}
size_type enlarge_threshold() const {
return enlarge_threshold_;
}
void set_enlarge_threshold(size_type t) {
enlarge_threshold_ = t;
}
size_type shrink_threshold() const {
return shrink_threshold_;
}
void set_shrink_threshold(size_type t) {
shrink_threshold_ = t;
}
size_type enlarge_size(size_type x) const {
return static_cast<size_type>(x * enlarge_factor_);
}
size_type shrink_size(size_type x) const {
return static_cast<size_type>(x * shrink_factor_);
}
bool consider_shrink() const {
return consider_shrink_;
}
void set_consider_shrink(bool t) {
consider_shrink_ = t;
}
bool use_empty() const {
return use_empty_;
}
void set_use_empty(bool t) {
use_empty_ = t;
}
bool use_deleted() const {
return use_deleted_;
}
void set_use_deleted(bool t) {
use_deleted_ = t;
}
size_type num_ht_copies() const {
return static_cast<size_type>(num_ht_copies_);
}
void inc_num_ht_copies() {
++num_ht_copies_;
}
// Reset the enlarge and shrink thresholds
void reset_thresholds(size_type num_buckets) {
set_enlarge_threshold(enlarge_size(num_buckets));
set_shrink_threshold(shrink_size(num_buckets));
// whatever caused us to reset already considered
set_consider_shrink(false);
}
// Caller is resposible for calling reset_threshold right after
// set_resizing_parameters.
void set_resizing_parameters(float shrink, float grow) {
assert(shrink >= 0.0);
assert(grow <= 1.0);
if (shrink > grow/2.0f)
shrink = grow / 2.0f; // otherwise we thrash hashtable size
set_shrink_factor(shrink);
set_enlarge_factor(grow);
}
// This is the smallest size a hashtable can be without being too crowded
// If you like, you can give a min #buckets as well as a min #elts
size_type min_buckets(size_type num_elts, size_type min_buckets_wanted) {
float enlarge = enlarge_factor();
size_type sz = HT_MIN_BUCKETS; // min buckets allowed
while ( sz < min_buckets_wanted ||
num_elts >= static_cast<size_type>(sz * enlarge) ) {
// This just prevents overflowing size_type, since sz can exceed
// max_size() here.
if (static_cast<size_type>(sz * 2) < sz) {
throw std::length_error("resize overflow"); // protect against overflow
}
sz *= 2;
public:
sh_hashtable_settings(const hasher& hf, const float ht_occupancy_flt, const float ht_empty_flt)
: hasher(hf)
, enlarge_threshold_(0)
, shrink_threshold_(0)
, consider_shrink_(false)
, use_empty_(false)
, use_deleted_(false)
, num_ht_copies_(0)
{
set_enlarge_factor(ht_occupancy_flt);
set_shrink_factor(ht_empty_flt);
}
return sz;
}
private:
template<class HashKey> class hash_munger {
public:
static size_t MungedHash(size_t hash) {
return hash;
size_type hash(const key_type& v) const
{
// We munge the hash value when we don't trust hasher::operator().
return hash_munger<Key>::MungedHash(hasher::operator()(v));
}
};
// This matches when the hashtable key is a pointer.
template<class HashKey> class hash_munger<HashKey*> {
public:
static size_t MungedHash(size_t hash) {
// TODO(csilvers): consider rotating instead:
// static const int shift = (sizeof(void *) == 4) ? 2 : 3;
// return (hash << (sizeof(hash) * 8) - shift)) | (hash >> shift);
// This matters if we ever change sparse/dense_hash_* to compare
// hashes before comparing actual values. It's speedy on x86.
return hash / sizeof(void*); // get rid of known-0 bits
}
};
size_type enlarge_threshold_; // table.size() * enlarge_factor
size_type shrink_threshold_; // table.size() * shrink_factor
float enlarge_factor_; // how full before resize
float shrink_factor_; // how empty before resize
// consider_shrink=true if we should try to shrink before next insert
bool consider_shrink_;
bool use_empty_; // used only by densehashtable, not sparsehashtable
bool use_deleted_; // false until delkey has been set
// num_ht_copies is a counter incremented every Copy/Move
unsigned int num_ht_copies_;
float enlarge_factor() const { return enlarge_factor_; }
void set_enlarge_factor(float f) { enlarge_factor_ = f; }
float shrink_factor() const { return shrink_factor_; }
void set_shrink_factor(float f) { shrink_factor_ = f; }
size_type enlarge_threshold() const { return enlarge_threshold_; }
void set_enlarge_threshold(size_type t) { enlarge_threshold_ = t; }
size_type shrink_threshold() const { return shrink_threshold_; }
void set_shrink_threshold(size_type t) { shrink_threshold_ = t; }
size_type enlarge_size(size_type x) const { return static_cast<size_type>(x * enlarge_factor_); }
size_type shrink_size(size_type x) const { return static_cast<size_type>(x * shrink_factor_); }
bool consider_shrink() const { return consider_shrink_; }
void set_consider_shrink(bool t) { consider_shrink_ = t; }
bool use_empty() const { return use_empty_; }
void set_use_empty(bool t) { use_empty_ = t; }
bool use_deleted() const { return use_deleted_; }
void set_use_deleted(bool t) { use_deleted_ = t; }
size_type num_ht_copies() const { return static_cast<size_type>(num_ht_copies_); }
void inc_num_ht_copies() { ++num_ht_copies_; }
// Reset the enlarge and shrink thresholds
void reset_thresholds(size_type num_buckets)
{
set_enlarge_threshold(enlarge_size(num_buckets));
set_shrink_threshold(shrink_size(num_buckets));
// whatever caused us to reset already considered
set_consider_shrink(false);
}
// Caller is resposible for calling reset_threshold right after
// set_resizing_parameters.
void set_resizing_parameters(float shrink, float grow)
{
assert(shrink >= 0.0);
assert(grow <= 1.0);
if (shrink > grow / 2.0f)
shrink = grow / 2.0f; // otherwise we thrash hashtable size
set_shrink_factor(shrink);
set_enlarge_factor(grow);
}
// This is the smallest size a hashtable can be without being too crowded
// If you like, you can give a min #buckets as well as a min #elts
size_type min_buckets(size_type num_elts, size_type min_buckets_wanted)
{
float enlarge = enlarge_factor();
size_type sz = HT_MIN_BUCKETS; // min buckets allowed
while (sz < min_buckets_wanted || num_elts >= static_cast<size_type>(sz * enlarge))
{
// This just prevents overflowing size_type, since sz can exceed
// max_size() here.
if (static_cast<size_type>(sz * 2) < sz)
{
throw std::length_error("resize overflow"); // protect against overflow
}
sz *= 2;
}
return sz;
}
private:
template<class HashKey>
class hash_munger
{
public:
static size_t MungedHash(size_t hash) { return hash; }
};
// This matches when the hashtable key is a pointer.
template<class HashKey>
class hash_munger<HashKey*>
{
public:
static size_t MungedHash(size_t hash)
{
// TODO(csilvers): consider rotating instead:
// static const int shift = (sizeof(void *) == 4) ? 2 : 3;
// return (hash << (sizeof(hash) * 8) - shift)) | (hash >> shift);
// This matters if we ever change sparse/dense_hash_* to compare
// hashes before comparing actual values. It's speedy on x86.
return hash / sizeof(void*); // get rid of known-0 bits
}
};
size_type enlarge_threshold_; // table.size() * enlarge_factor
size_type shrink_threshold_; // table.size() * shrink_factor
float enlarge_factor_; // how full before resize
float shrink_factor_; // how empty before resize
// consider_shrink=true if we should try to shrink before next insert
bool consider_shrink_;
bool use_empty_; // used only by densehashtable, not sparsehashtable
bool use_deleted_; // false until delkey has been set
// num_ht_copies is a counter incremented every Copy/Move
unsigned int num_ht_copies_;
};
} // namespace sparsehash_internal
} // namespace sparsehash_internal
_END_GOOGLE_NAMESPACE_
#endif // UTIL_GTL_HASHTABLE_COMMON_H_
#endif // UTIL_GTL_HASHTABLE_COMMON_H_

View file

@ -33,87 +33,82 @@
#define UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_
#include <mapnik/sparsehash/internal/sparseconfig.h>
#include <stdlib.h> // for malloc/realloc/free
#include <stddef.h> // for ptrdiff_t
#include <new> // for placement new
#include <stdlib.h> // for malloc/realloc/free
#include <stddef.h> // for ptrdiff_t
#include <new> // for placement new
_START_GOOGLE_NAMESPACE_
template<class T>
class libc_allocator_with_realloc {
public:
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
class libc_allocator_with_realloc
{
public:
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
libc_allocator_with_realloc() {}
libc_allocator_with_realloc(const libc_allocator_with_realloc&) {}
~libc_allocator_with_realloc() {}
libc_allocator_with_realloc() {}
libc_allocator_with_realloc(const libc_allocator_with_realloc&) {}
~libc_allocator_with_realloc() {}
pointer address(reference r) const { return &r; }
const_pointer address(const_reference r) const { return &r; }
pointer address(reference r) const { return &r; }
const_pointer address(const_reference r) const { return &r; }
pointer allocate(size_type n, const_pointer = 0) {
return static_cast<pointer>(malloc(n * sizeof(value_type)));
}
void deallocate(pointer p, size_type) {
free(p);
}
pointer reallocate(pointer p, size_type n) {
return static_cast<pointer>(realloc(p, n * sizeof(value_type)));
}
pointer allocate(size_type n, const_pointer = 0) { return static_cast<pointer>(malloc(n * sizeof(value_type))); }
void deallocate(pointer p, size_type) { free(p); }
pointer reallocate(pointer p, size_type n) { return static_cast<pointer>(realloc(p, n * sizeof(value_type))); }
size_type max_size() const {
return static_cast<size_type>(-1) / sizeof(value_type);
}
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(value_type); }
void construct(pointer p, const value_type& val) {
new(p) value_type(val);
}
void destroy(pointer p) { p->~value_type(); }
void construct(pointer p, const value_type& val) { new (p) value_type(val); }
void destroy(pointer p) { p->~value_type(); }
template <class U>
libc_allocator_with_realloc(const libc_allocator_with_realloc<U>&) {}
template<class U>
libc_allocator_with_realloc(const libc_allocator_with_realloc<U>&)
{}
template<class U>
struct rebind {
typedef libc_allocator_with_realloc<U> other;
};
template<class U>
struct rebind
{
typedef libc_allocator_with_realloc<U> other;
};
};
// libc_allocator_with_realloc<void> specialization.
template<>
class libc_allocator_with_realloc<void> {
public:
typedef void value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef void* pointer;
typedef const void* const_pointer;
class libc_allocator_with_realloc<void>
{
public:
typedef void value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef void* pointer;
typedef const void* const_pointer;
template<class U>
struct rebind {
typedef libc_allocator_with_realloc<U> other;
};
template<class U>
struct rebind
{
typedef libc_allocator_with_realloc<U> other;
};
};
template<class T>
inline bool operator==(const libc_allocator_with_realloc<T>&,
const libc_allocator_with_realloc<T>&) {
return true;
inline bool operator==(const libc_allocator_with_realloc<T>&, const libc_allocator_with_realloc<T>&)
{
return true;
}
template<class T>
inline bool operator!=(const libc_allocator_with_realloc<T>&,
const libc_allocator_with_realloc<T>&) {
return false;
inline bool operator!=(const libc_allocator_with_realloc<T>&, const libc_allocator_with_realloc<T>&)
{
return false;
}
_END_GOOGLE_NAMESPACE_
#endif // UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_
#endif // UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_

View file

@ -56,14 +56,16 @@ _START_GOOGLE_NAMESPACE_
// sizeof(big_)
typedef char small_;
struct big_ {
char dummy[2];
struct big_
{
char dummy[2];
};
// Identity metafunction.
template <class T>
struct identity_ {
typedef T type;
template<class T>
struct identity_
{
typedef T type;
};
// integral_constant, defined in tr1, is a wrapper for an integer
@ -72,63 +74,64 @@ struct identity_ {
// general integer_constant for compatibility with tr1.
template<class T, T v>
struct integral_constant {
static const T value = v;
typedef T value_type;
typedef integral_constant<T, v> type;
struct integral_constant
{
static const T value = v;
typedef T value_type;
typedef integral_constant<T, v> type;
};
template <class T, T v> const T integral_constant<T, v>::value;
template<class T, T v>
const T integral_constant<T, v>::value;
// Abbreviations: true_type and false_type are structs that represent boolean
// true and false values. Also define the boost::mpl versions of those names,
// true_ and false_.
typedef integral_constant<bool, true> true_type;
typedef integral_constant<bool, true> true_type;
typedef integral_constant<bool, false> false_type;
typedef true_type true_;
typedef true_type true_;
typedef false_type false_;
// if_ is a templatized conditional statement.
// if_<cond, A, B> is a compile time evaluation of cond.
// if_<>::type contains A if cond is true, B otherwise.
template<bool cond, typename A, typename B>
struct if_{
typedef A type;
struct if_
{
typedef A type;
};
template<typename A, typename B>
struct if_<false, A, B> {
typedef B type;
struct if_<false, A, B>
{
typedef B type;
};
// type_equals_ is a template type comparator, similar to Loki IsSameType.
// type_equals_<A, B>::value is true iff "A" is the same type as "B".
//
// New code should prefer base::is_same, defined in base/type_traits.h.
// It is functionally identical, but is_same is the standard spelling.
template<typename A, typename B>
struct type_equals_ : public false_ {
};
struct type_equals_ : public false_
{};
template<typename A>
struct type_equals_<A, A> : public true_ {
};
struct type_equals_<A, A> : public true_
{};
// and_ is a template && operator.
// and_<A, B>::value evaluates "A::value && B::value".
template<typename A, typename B>
struct and_ : public integral_constant<bool, (A::value && B::value)> {
};
struct and_ : public integral_constant<bool, (A::value && B::value)>
{};
// or_ is a template || operator.
// or_<A, B>::value evaluates "A::value || B::value".
template<typename A, typename B>
struct or_ : public integral_constant<bool, (A::value || B::value)> {
};
struct or_ : public integral_constant<bool, (A::value || B::value)>
{};
_END_GOOGLE_NAMESPACE_
#endif // BASE_TEMPLATE_UTIL_H_
#endif // BASE_TEMPLATE_UTIL_H_

View file

@ -58,106 +58,188 @@
#define BASE_TYPE_TRAITS_H_
#include <mapnik/sparsehash/internal/sparseconfig.h>
#include <utility> // For pair
#include <utility> // For pair
#include <mapnik/sparsehash/template_util.h> // For true_type and false_type
#include <mapnik/sparsehash/template_util.h> // For true_type and false_type
_START_GOOGLE_NAMESPACE_
template <class T> struct is_integral;
template <class T> struct is_floating_point;
template <class T> struct is_pointer;
template<class T>
struct is_integral;
template<class T>
struct is_floating_point;
template<class T>
struct is_pointer;
// MSVC can't compile this correctly, and neither can gcc 3.3.5 (at least)
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum uses is_convertible, which is not available on MSVC.
template <class T> struct is_enum;
template<class T>
struct is_enum;
#endif
template <class T> struct is_reference;
template <class T> struct is_pod;
template <class T> struct has_trivial_constructor;
template <class T> struct has_trivial_copy;
template <class T> struct has_trivial_assign;
template <class T> struct has_trivial_destructor;
template <class T> struct remove_const;
template <class T> struct remove_volatile;
template <class T> struct remove_cv;
template <class T> struct remove_reference;
template <class T> struct add_reference;
template <class T> struct remove_pointer;
template <class T, class U> struct is_same;
template<class T>
struct is_reference;
template<class T>
struct is_pod;
template<class T>
struct has_trivial_constructor;
template<class T>
struct has_trivial_copy;
template<class T>
struct has_trivial_assign;
template<class T>
struct has_trivial_destructor;
template<class T>
struct remove_const;
template<class T>
struct remove_volatile;
template<class T>
struct remove_cv;
template<class T>
struct remove_reference;
template<class T>
struct add_reference;
template<class T>
struct remove_pointer;
template<class T, class U>
struct is_same;
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
template <class From, class To> struct is_convertible;
template<class From, class To>
struct is_convertible;
#endif
// is_integral is false except for the built-in integer types. A
// cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_integral : false_type { };
template<> struct is_integral<bool> : true_type { };
template<> struct is_integral<char> : true_type { };
template<> struct is_integral<unsigned char> : true_type { };
template<> struct is_integral<signed char> : true_type { };
template<class T>
struct is_integral : false_type
{};
template<>
struct is_integral<bool> : true_type
{};
template<>
struct is_integral<char> : true_type
{};
template<>
struct is_integral<unsigned char> : true_type
{};
template<>
struct is_integral<signed char> : true_type
{};
#if defined(_MSC_VER)
// wchar_t is not by default a distinct type from unsigned short in
// Microsoft C.
// See http://msdn2.microsoft.com/en-us/library/dh8che7s(VS.80).aspx
template<> struct is_integral<__wchar_t> : true_type { };
template<>
struct is_integral<__wchar_t> : true_type
{};
#else
template<> struct is_integral<wchar_t> : true_type { };
template<>
struct is_integral<wchar_t> : true_type
{};
#endif
template<> struct is_integral<short> : true_type { };
template<> struct is_integral<unsigned short> : true_type { };
template<> struct is_integral<int> : true_type { };
template<> struct is_integral<unsigned int> : true_type { };
template<> struct is_integral<long> : true_type { };
template<> struct is_integral<unsigned long> : true_type { };
template<>
struct is_integral<short> : true_type
{};
template<>
struct is_integral<unsigned short> : true_type
{};
template<>
struct is_integral<int> : true_type
{};
template<>
struct is_integral<unsigned int> : true_type
{};
template<>
struct is_integral<long> : true_type
{};
template<>
struct is_integral<unsigned long> : true_type
{};
#ifdef HAVE_LONG_LONG
template<> struct is_integral<long long> : true_type { };
template<> struct is_integral<unsigned long long> : true_type { };
template<>
struct is_integral<long long> : true_type
{};
template<>
struct is_integral<unsigned long long> : true_type
{};
#endif
template <class T> struct is_integral<const T> : is_integral<T> { };
template <class T> struct is_integral<volatile T> : is_integral<T> { };
template <class T> struct is_integral<const volatile T> : is_integral<T> { };
template<class T>
struct is_integral<const T> : is_integral<T>
{};
template<class T>
struct is_integral<volatile T> : is_integral<T>
{};
template<class T>
struct is_integral<const volatile T> : is_integral<T>
{};
// is_floating_point is false except for the built-in floating-point types.
// A cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_floating_point : false_type { };
template<> struct is_floating_point<float> : true_type { };
template<> struct is_floating_point<double> : true_type { };
template<> struct is_floating_point<long double> : true_type { };
template <class T> struct is_floating_point<const T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<volatile T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<const volatile T>
: is_floating_point<T> { };
template<class T>
struct is_floating_point : false_type
{};
template<>
struct is_floating_point<float> : true_type
{};
template<>
struct is_floating_point<double> : true_type
{};
template<>
struct is_floating_point<long double> : true_type
{};
template<class T>
struct is_floating_point<const T> : is_floating_point<T>
{};
template<class T>
struct is_floating_point<volatile T> : is_floating_point<T>
{};
template<class T>
struct is_floating_point<const volatile T> : is_floating_point<T>
{};
// is_pointer is false except for pointer types. A cv-qualified type (e.g.
// "int* const", as opposed to "int const*") is cv-qualified if and only if
// the underlying type is.
template <class T> struct is_pointer : false_type { };
template <class T> struct is_pointer<T*> : true_type { };
template <class T> struct is_pointer<const T> : is_pointer<T> { };
template <class T> struct is_pointer<volatile T> : is_pointer<T> { };
template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
template<class T>
struct is_pointer : false_type
{};
template<class T>
struct is_pointer<T*> : true_type
{};
template<class T>
struct is_pointer<const T> : is_pointer<T>
{};
template<class T>
struct is_pointer<volatile T> : is_pointer<T>
{};
template<class T>
struct is_pointer<const volatile T> : is_pointer<T>
{};
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
namespace internal {
template <class T> struct is_class_or_union {
template <class U> static small_ tester(void (U::*)());
template <class U> static big_ tester(...);
static const bool value = sizeof(tester<T>(0)) == sizeof(small_);
template<class T>
struct is_class_or_union
{
template<class U>
static small_ tester(void (U::*)());
template<class U>
static big_ tester(...);
static const bool value = sizeof(tester<T>(0)) == sizeof(small_);
};
// is_convertible chokes if the first argument is an array. That's why
// we use add_reference here.
template <bool NotUnum, class T> struct is_enum_impl
: is_convertible<typename add_reference<T>::type, int> { };
template<bool NotUnum, class T>
struct is_enum_impl : is_convertible<typename add_reference<T>::type, int>
{};
template <class T> struct is_enum_impl<true, T> : false_type { };
template<class T>
struct is_enum_impl<true, T> : false_type
{};
} // namespace internal
} // namespace internal
// Specified by TR1 [4.5.1] primary type categories.
@ -174,127 +256,212 @@ template <class T> struct is_enum_impl<true, T> : false_type { };
// Is-convertible-to-int check is done only if all other checks pass,
// because it can't be used with some types (e.g. void or classes with
// inaccessible conversion operators).
template <class T> struct is_enum
: internal::is_enum_impl<
is_same<T, void>::value ||
is_integral<T>::value ||
is_floating_point<T>::value ||
is_reference<T>::value ||
internal::is_class_or_union<T>::value,
T> { };
template<class T>
struct is_enum
: internal::is_enum_impl<is_same<T, void>::value || is_integral<T>::value || is_floating_point<T>::value ||
is_reference<T>::value || internal::is_class_or_union<T>::value,
T>
{};
template <class T> struct is_enum<const T> : is_enum<T> { };
template <class T> struct is_enum<volatile T> : is_enum<T> { };
template <class T> struct is_enum<const volatile T> : is_enum<T> { };
template<class T>
struct is_enum<const T> : is_enum<T>
{};
template<class T>
struct is_enum<volatile T> : is_enum<T>
{};
template<class T>
struct is_enum<const volatile T> : is_enum<T>
{};
#endif
// is_reference is false except for reference types.
template<typename T> struct is_reference : false_type {};
template<typename T> struct is_reference<T&> : true_type {};
template<typename T>
struct is_reference : false_type
{};
template<typename T>
struct is_reference<T&> : true_type
{};
// We can't get is_pod right without compiler help, so fail conservatively.
// We will assume it's false except for arithmetic types, enumerations,
// pointers and cv-qualified versions thereof. Note that std::pair<T,U>
// is not a POD even if T and U are PODs.
template <class T> struct is_pod
: integral_constant<bool, (is_integral<T>::value ||
is_floating_point<T>::value ||
template<class T>
struct is_pod : integral_constant<bool,
(is_integral<T>::value || is_floating_point<T>::value ||
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum is not available on MSVC.
is_enum<T>::value ||
// is_enum is not available on MSVC.
is_enum<T>::value ||
#endif
is_pointer<T>::value)> { };
template <class T> struct is_pod<const T> : is_pod<T> { };
template <class T> struct is_pod<volatile T> : is_pod<T> { };
template <class T> struct is_pod<const volatile T> : is_pod<T> { };
is_pointer<T>::value)>
{};
template<class T>
struct is_pod<const T> : is_pod<T>
{};
template<class T>
struct is_pod<volatile T> : is_pod<T>
{};
template<class T>
struct is_pod<const volatile T> : is_pod<T>
{};
// We can't get has_trivial_constructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// constructors. (3) array of a type with a trivial constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_constructor : is_pod<T> { };
template <class T, class U> struct has_trivial_constructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_constructor<T>::value &&
has_trivial_constructor<U>::value)> { };
template <class A, int N> struct has_trivial_constructor<A[N]>
: has_trivial_constructor<A> { };
template <class T> struct has_trivial_constructor<const T>
: has_trivial_constructor<T> { };
template<class T>
struct has_trivial_constructor : is_pod<T>
{};
template<class T, class U>
struct has_trivial_constructor<std::pair<T, U>>
: integral_constant<bool, (has_trivial_constructor<T>::value && has_trivial_constructor<U>::value)>
{};
template<class A, int N>
struct has_trivial_constructor<A[N]> : has_trivial_constructor<A>
{};
template<class T>
struct has_trivial_constructor<const T> : has_trivial_constructor<T>
{};
// We can't get has_trivial_copy right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial copy constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_copy : is_pod<T> { };
template <class T, class U> struct has_trivial_copy<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_copy<T>::value &&
has_trivial_copy<U>::value)> { };
template <class A, int N> struct has_trivial_copy<A[N]>
: has_trivial_copy<A> { };
template <class T> struct has_trivial_copy<const T> : has_trivial_copy<T> { };
template<class T>
struct has_trivial_copy : is_pod<T>
{};
template<class T, class U>
struct has_trivial_copy<std::pair<T, U>>
: integral_constant<bool, (has_trivial_copy<T>::value && has_trivial_copy<U>::value)>
{};
template<class A, int N>
struct has_trivial_copy<A[N]> : has_trivial_copy<A>
{};
template<class T>
struct has_trivial_copy<const T> : has_trivial_copy<T>
{};
// We can't get has_trivial_assign right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial assign constructor.
template <class T> struct has_trivial_assign : is_pod<T> { };
template <class T, class U> struct has_trivial_assign<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_assign<T>::value &&
has_trivial_assign<U>::value)> { };
template <class A, int N> struct has_trivial_assign<A[N]>
: has_trivial_assign<A> { };
template<class T>
struct has_trivial_assign : is_pod<T>
{};
template<class T, class U>
struct has_trivial_assign<std::pair<T, U>>
: integral_constant<bool, (has_trivial_assign<T>::value && has_trivial_assign<U>::value)>
{};
template<class A, int N>
struct has_trivial_assign<A[N]> : has_trivial_assign<A>
{};
// We can't get has_trivial_destructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// destructors. (3) array of a type with a trivial destructor.
// (4) const versions thereof.
template <class T> struct has_trivial_destructor : is_pod<T> { };
template <class T, class U> struct has_trivial_destructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_destructor<T>::value &&
has_trivial_destructor<U>::value)> { };
template <class A, int N> struct has_trivial_destructor<A[N]>
: has_trivial_destructor<A> { };
template <class T> struct has_trivial_destructor<const T>
: has_trivial_destructor<T> { };
template<class T>
struct has_trivial_destructor : is_pod<T>
{};
template<class T, class U>
struct has_trivial_destructor<std::pair<T, U>>
: integral_constant<bool, (has_trivial_destructor<T>::value && has_trivial_destructor<U>::value)>
{};
template<class A, int N>
struct has_trivial_destructor<A[N]> : has_trivial_destructor<A>
{};
template<class T>
struct has_trivial_destructor<const T> : has_trivial_destructor<T>
{};
// Specified by TR1 [4.7.1]
template<typename T> struct remove_const { typedef T type; };
template<typename T> struct remove_const<T const> { typedef T type; };
template<typename T> struct remove_volatile { typedef T type; };
template<typename T> struct remove_volatile<T volatile> { typedef T type; };
template<typename T> struct remove_cv {
typedef typename remove_const<typename remove_volatile<T>::type>::type type;
template<typename T>
struct remove_const
{
typedef T type;
};
template<typename T>
struct remove_const<T const>
{
typedef T type;
};
template<typename T>
struct remove_volatile
{
typedef T type;
};
template<typename T>
struct remove_volatile<T volatile>
{
typedef T type;
};
template<typename T>
struct remove_cv
{
typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
// Specified by TR1 [4.7.2] Reference modifications.
template<typename T> struct remove_reference { typedef T type; };
template<typename T> struct remove_reference<T&> { typedef T type; };
template<typename T>
struct remove_reference
{
typedef T type;
};
template<typename T>
struct remove_reference<T&>
{
typedef T type;
};
template <typename T> struct add_reference { typedef T& type; };
template <typename T> struct add_reference<T&> { typedef T& type; };
template<typename T>
struct add_reference
{
typedef T& type;
};
template<typename T>
struct add_reference<T&>
{
typedef T& type;
};
// Specified by TR1 [4.7.4] Pointer modifications.
template<typename T> struct remove_pointer { typedef T type; };
template<typename T> struct remove_pointer<T*> { typedef T type; };
template<typename T> struct remove_pointer<T* const> { typedef T type; };
template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
template<typename T> struct remove_pointer<T* const volatile> {
typedef T type; };
template<typename T>
struct remove_pointer
{
typedef T type;
};
template<typename T>
struct remove_pointer<T*>
{
typedef T type;
};
template<typename T>
struct remove_pointer<T* const>
{
typedef T type;
};
template<typename T>
struct remove_pointer<T* volatile>
{
typedef T type;
};
template<typename T>
struct remove_pointer<T* const volatile>
{
typedef T type;
};
// Specified by TR1 [4.6] Relationships between types
template<typename T, typename U> struct is_same : public false_type { };
template<typename T> struct is_same<T, T> : public true_type { };
template<typename T, typename U>
struct is_same : public false_type
{};
template<typename T>
struct is_same<T, T> : public true_type
{};
// Specified by TR1 [4.6] Relationships between types
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
@ -309,22 +476,22 @@ namespace internal {
// had called it with an argument of type From. See Alexandrescu's
// _Modern C++ Design_ for more details on this sort of trick.
template <typename From, typename To>
struct ConvertHelper {
static small_ Test(To);
static big_ Test(...);
static From Create();
template<typename From, typename To>
struct ConvertHelper
{
static small_ Test(To);
static big_ Test(...);
static From Create();
};
} // namespace internal
} // namespace internal
// Inherits from true_type if From is convertible to To, false_type otherwise.
template <typename From, typename To>
template<typename From, typename To>
struct is_convertible
: integral_constant<bool,
sizeof(internal::ConvertHelper<From, To>::Test(
internal::ConvertHelper<From, To>::Create()))
== sizeof(small_)> {
};
sizeof(internal::ConvertHelper<From, To>::Test(internal::ConvertHelper<From, To>::Create())) ==
sizeof(small_)>
{};
#endif
_END_GOOGLE_NAMESPACE_
@ -333,10 +500,10 @@ _END_GOOGLE_NAMESPACE_
// these types are PODs, for human use. They may be made more contentful
// later. The typedef is just to make it legal to put a semicolon after
// these macros.
#define DECLARE_POD(TypeName) typedef int Dummy_Type_For_DECLARE_POD
#define DECLARE_POD(TypeName) typedef int Dummy_Type_For_DECLARE_POD
#define DECLARE_NESTED_POD(TypeName) DECLARE_POD(TypeName)
#define PROPAGATE_POD_FROM_TEMPLATE_ARGUMENT(TemplateName) \
#define PROPAGATE_POD_FROM_TEMPLATE_ARGUMENT(TemplateName) \
typedef int Dummy_Type_For_PROPAGATE_POD_FROM_TEMPLATE_ARGUMENT
#define ENFORCE_POD(TypeName) typedef int Dummy_Type_For_ENFORCE_POD
#endif // BASE_TYPE_TRAITS_H_
#endif // BASE_TYPE_TRAITS_H_

View file

@ -96,7 +96,10 @@ class MAPNIK_DECL logger : public singleton<logger, CreateStatic>,
}
// format
static std::string const& get_format() { return format_; }
static std::string const& get_format()
{
return format_;
}
static void set_format(std::string const& format)
{
@ -208,7 +211,10 @@ class base_log : public util::noncopyable
private:
#ifdef MAPNIK_LOG
inline bool check_severity() { return Severity >= logger::get_object_severity(object_name_); }
inline bool check_severity()
{
return Severity >= logger::get_object_severity(object_name_);
}
typename output_policy::stream_buffer streambuf_;
std::string object_name_;

View file

@ -152,7 +152,10 @@ class hextree : private util::noncopyable
~hextree() {}
void setMaxColors(unsigned max_colors) { max_colors_ = max_colors; }
void setMaxColors(unsigned max_colors)
{
max_colors_ = max_colors;
}
void setGamma(double g)
{
@ -163,9 +166,15 @@ class hextree : private util::noncopyable
}
}
void setTransMode(unsigned t) { trans_mode_ = t; }
void setTransMode(unsigned t)
{
trans_mode_ = t;
}
transparency_mode_t getTransMode() const { return trans_mode_; }
transparency_mode_t getTransMode() const
{
return trans_mode_;
}
// process alpha value based on trans_mode_
std::uint8_t preprocessAlpha(std::uint8_t a) const

View file

@ -640,7 +640,10 @@ struct offset_converter
return cur_.cmd;
}
void push_vertex(vertex2d const& v) { vertices_.push_back(v); }
void push_vertex(vertex2d const& v)
{
vertices_.push_back(v);
}
Geometry& geom_;
double offset_;

View file

@ -73,10 +73,10 @@ class linear_gradient_from_segment
{
public:
linear_gradient_from_segment(double x1, double y1, double x2, double y2)
: x1_(x1 * agg::gradient_subpixel_scale)
, y1_(y1 * agg::gradient_subpixel_scale)
, x2_(x2 * agg::gradient_subpixel_scale)
, y2_(y2 * agg::gradient_subpixel_scale)
: x1_(x1 * static_cast<double>(agg::gradient_subpixel_scale))
, y1_(y1 * static_cast<double>(agg::gradient_subpixel_scale))
, x2_(x2 * static_cast<double>(agg::gradient_subpixel_scale))
, y2_(y2 * static_cast<double>(agg::gradient_subpixel_scale))
{
double dx = x2_ - x1_;
double dy = y2_ - y1_;
@ -492,8 +492,14 @@ class renderer_agg : util::noncopyable
}
#endif
inline VertexSource& source() const { return source_; }
inline AttributeSource const& attributes() const { return attributes_; }
inline VertexSource& source() const
{
return source_;
}
inline AttributeSource const& attributes() const
{
return attributes_;
}
private:

View file

@ -207,12 +207,12 @@ struct tag_setter
TIFFSetField(output_, TIFFTAG_SAMPLESPERPIXEL, 4);
if (data.get_premultiplied())
{
uint16 extras[] = {EXTRASAMPLE_ASSOCALPHA};
std::uint16_t extras[] = {EXTRASAMPLE_ASSOCALPHA};
TIFFSetField(output_, TIFFTAG_EXTRASAMPLES, 1, extras);
}
else
{
uint16 extras[] = {EXTRASAMPLE_UNASSALPHA};
std::uint16_t extras[] = {EXTRASAMPLE_UNASSALPHA};
TIFFSetField(output_, TIFFTAG_EXTRASAMPLES, 1, extras);
}
if (config_.compression == COMPRESSION_DEFLATE || config_.compression == COMPRESSION_ADOBE_DEFLATE ||

View file

@ -60,13 +60,25 @@ class file : public util::noncopyable
}
}
inline bool is_open() const { return file_ ? true : false; }
inline bool is_open() const
{
return file_ ? true : false;
}
explicit operator bool() const { return this->is_open(); }
explicit operator bool() const
{
return this->is_open();
}
inline std::FILE* get() const { return file_.get(); }
inline std::FILE* get() const
{
return file_.get();
}
inline std::size_t size() const { return size_; }
inline std::size_t size() const
{
return size_;
}
inline data_type data() const
{

View file

@ -77,7 +77,10 @@ class singleton
singleton(const singleton& rhs);
singleton& operator=(const singleton&);
static void onDeadReference() { throw std::runtime_error("dead reference!"); }
static void onDeadReference()
{
throw std::runtime_error("dead reference!");
}
static void DestroySingleton()
{

View file

@ -11,4 +11,3 @@ target_link_libraries(input-gdal ${_plugin_visibility}
mapnik::datasource-base
${GDAL_LIBRARIES}
)

View file

@ -8,4 +8,3 @@ target_link_libraries(input-geobuf ${_plugin_visibility}
mapnik::mapnik
mapnik::datasource-base
)

View file

@ -144,11 +144,20 @@ class ogr_layer_ptr
#endif
}
std::string const& layer_name() const { return layer_name_; }
std::string const& layer_name() const
{
return layer_name_;
}
OGRLayer* layer() const { return layer_; }
OGRLayer* layer() const
{
return layer_;
}
bool is_valid() const { return is_valid_; }
bool is_valid() const
{
return is_valid_;
}
private:

View file

@ -236,11 +236,20 @@ class Connection
return std::make_shared<ResultSet>(result);
}
std::string client_encoding() const { return PQparameterStatus(conn_, "client_encoding"); }
std::string client_encoding() const
{
return PQparameterStatus(conn_, "client_encoding");
}
bool isOK() const { return (!closed_) && (PQstatus(conn_) != CONNECTION_BAD); }
bool isOK() const
{
return (!closed_) && (PQstatus(conn_) != CONNECTION_BAD);
}
bool isPending() const { return pending_; }
bool isPending() const
{
return pending_;
}
void close()
{

View file

@ -171,15 +171,30 @@ class shape_file : public mapnik::util::mapped_memory_file
file_.read(reinterpret_cast<char*>(&envelope), sizeof(envelope));
}
inline void rewind() { seek(100); }
inline void rewind()
{
seek(100);
}
inline void seek(std::streampos pos) { file_.seekg(pos, std::ios::beg); }
inline void seek(std::streampos pos)
{
file_.seekg(pos, std::ios::beg);
}
inline std::streampos pos() { return file_.tellg(); }
inline std::streampos pos()
{
return file_.tellg();
}
inline bool is_eof() { return file_.eof(); }
inline bool is_eof()
{
return file_.eof();
}
inline bool is_good() { return file_.good(); }
inline bool is_good()
{
return file_.good();
}
};
#endif // SHAPEFILE_HPP

View file

@ -160,7 +160,10 @@ class sqlite_connection
return rc;
}
sqlite3* operator*() { return db_; }
sqlite3* operator*()
{
return db_;
}
bool load_extension(std::string const& ext_path)
{

View file

@ -65,7 +65,6 @@ if(MSVC)
target_compile_options(mapnik PUBLIC "/bigobj" "/wd4068" "/wd4661" "/wd4910")
endif()
target_sources(mapnik PRIVATE
cairo_io.cpp
color_factory.cpp

View file

@ -128,23 +128,25 @@ proj_transform::proj_transform(projection const& source, projection const& dest)
transform_ = proj_create_crs_to_crs(ctx_, source.params().c_str(), dest.params().c_str(), nullptr);
if (transform_ == nullptr)
{
throw std::runtime_error(std::string("Cannot initialize proj_transform (crs_to_crs) for given projections: '") +
source.params() + "'->'" + dest.params() +
throw std::runtime_error(
std::string("Cannot initialize proj_transform (crs_to_crs) for given projections: '") +
source.params() + "'->'" + dest.params() +
#if MAPNIK_PROJ_VERSION >= 80000
"' because of " + std::string(proj_context_errno_string(ctx_, proj_context_errno(ctx_))));
"' because of " + std::string(proj_context_errno_string(ctx_, proj_context_errno(ctx_))));
#else
"'");
"'");
#endif
}
PJ* transform_gis = proj_normalize_for_visualization(ctx_, transform_);
if (transform_gis == nullptr)
{
throw std::runtime_error(std::string("Cannot initialize proj_transform (normalize) for given projections: '") +
source.params() + "'->'" + dest.params() +
throw std::runtime_error(
std::string("Cannot initialize proj_transform (normalize) for given projections: '") +
source.params() + "'->'" + dest.params() +
#if MAPNIK_PROJ_VERSION >= 80000
"' because of " + std::string(proj_context_errno_string(ctx_, proj_context_errno(ctx_))));
"' because of " + std::string(proj_context_errno_string(ctx_, proj_context_errno(ctx_))));
#else
"'");
"'");
#endif
}
proj_destroy(transform_);

View file

@ -96,19 +96,49 @@ class tiff_reader : public image_reader
unsigned width() const final;
unsigned height() const final;
boost::optional<box2d<double>> bounding_box() const final;
inline bool has_alpha() const final { return has_alpha_; }
inline bool has_alpha() const final
{
return has_alpha_;
}
void read(unsigned x, unsigned y, image_rgba8& image) final;
image_any read(unsigned x, unsigned y, unsigned width, unsigned height) final;
// methods specific to tiff reader
unsigned bits_per_sample() const { return bps_; }
unsigned sample_format() const { return sample_format_; }
unsigned photometric() const { return photometric_; }
bool is_tiled() const { return is_tiled_; }
unsigned tile_width() const { return tile_width_; }
unsigned tile_height() const { return tile_height_; }
unsigned rows_per_strip() const { return rows_per_strip_; }
unsigned planar_config() const { return planar_config_; }
unsigned compression() const { return compression_; }
unsigned bits_per_sample() const
{
return bps_;
}
unsigned sample_format() const
{
return sample_format_;
}
unsigned photometric() const
{
return photometric_;
}
bool is_tiled() const
{
return is_tiled_;
}
unsigned tile_width() const
{
return tile_width_;
}
unsigned tile_height() const
{
return tile_height_;
}
unsigned rows_per_strip() const
{
return rows_per_strip_;
}
unsigned planar_config() const
{
return planar_config_;
}
unsigned compression() const
{
return compression_;
}
private:
tiff_reader(const tiff_reader&);
@ -253,8 +283,8 @@ void tiff_reader<T>::init()
read_method_ = stripped;
}
// TIFFTAG_EXTRASAMPLES
uint16 extrasamples = 0;
uint16* sampleinfo = nullptr;
std::uint16_t extrasamples = 0;
std::uint16_t* sampleinfo = nullptr;
if (TIFFGetField(tif, TIFFTAG_EXTRASAMPLES, &extrasamples, &sampleinfo))
{
has_alpha_ = true;
@ -265,7 +295,7 @@ void tiff_reader<T>::init()
}
// Try extracting bounding box from geoTIFF tags
{
uint16 count = 0;
std::uint16_t count = 0;
double* pixelscale;
double* tilepoint;
if (TIFFGetField(tif, 33550, &count, &pixelscale) == 1 && count == 3 &&

File diff suppressed because it is too large Load diff

View file

@ -3,20 +3,21 @@
#include "catch.hpp"
#define TRY_CHECK( expr ) \
try { \
CHECK( expr ); \
} \
catch ( Catch::TestFailureException & ) { \
/* thrown by CHECK after it catches and reports */ \
/* an exception from expr => swallow this */ \
#define TRY_CHECK(expr) \
try \
{ \
CHECK(expr); \
} catch (Catch::TestFailureException&) \
{ \
/* thrown by CHECK after it catches and reports */ \
/* an exception from expr => swallow this */ \
}
#define TRY_CHECK_FALSE( expr ) \
try { \
CHECK_FALSE( expr ); \
} \
catch ( Catch::TestFailureException & ) { \
}
#define TRY_CHECK_FALSE(expr) \
try \
{ \
CHECK_FALSE(expr); \
} catch (Catch::TestFailureException&) \
{}
#endif // TEST_CATCH_EXT_HPP

View file

@ -343,7 +343,10 @@ TEST_CASE("postgis")
REQUIRE(ds != nullptr);
auto featureset = all_features(ds);
auto feature = featureset->next();
CHECKED_IF(feature != nullptr) { CHECK(feature->get("email").to_string() == "fake@mail.ru"); }
CHECKED_IF(feature != nullptr)
{
CHECK(feature->get("email").to_string() == "fake@mail.ru");
}
}
SECTION("Postgis interpolates !@uservar! tokens in query")

View file

@ -381,7 +381,10 @@ TEST_CASE("image class")
// swap empty <-> non-empty
CHECK_NOTHROW(im.swap(im3));
CHECK(im3.data() == nullptr);
CHECKED_IF(im.data() != nullptr) { CHECK(im(0, 0) == blue); }
CHECKED_IF(im.data() != nullptr)
{
CHECK(im(0, 0) == blue);
}
}
} // END TEST CASE

View file

@ -167,9 +167,15 @@ void test_tiff_reader(std::string const& pattern)
TEST_CASE("tiff io")
{
SECTION("tiff-reader rgb8+rgba8") { test_tiff_reader<mapnik::image_rgba8>("tiff_rgb"); }
SECTION("tiff-reader rgb8+rgba8")
{
test_tiff_reader<mapnik::image_rgba8>("tiff_rgb");
}
SECTION("tiff-reader gray8") { test_tiff_reader<mapnik::image_gray8>("tiff_gray"); }
SECTION("tiff-reader gray8")
{
test_tiff_reader<mapnik::image_gray8>("tiff_gray");
}
SECTION("scan rgb8 striped")
{

View file

@ -32,23 +32,62 @@
TEST_CASE("saturated cast")
{
SECTION("int8") { CAST_ASSERT(std::int8_t); }
SECTION("int16") { CAST_ASSERT(std::int16_t); }
SECTION("int32") { CAST_ASSERT(std::int32_t); }
SECTION("int8")
{
CAST_ASSERT(std::int8_t);
}
SECTION("int16")
{
CAST_ASSERT(std::int16_t);
}
SECTION("int32")
{
CAST_ASSERT(std::int32_t);
}
SECTION("int64") { CAST_ASSERT2(std::int64_t); }
SECTION("intmax") { CAST_ASSERT2(std::intmax_t); }
SECTION("intptr") { CAST_ASSERT2(std::intptr_t); }
SECTION("int64")
{
CAST_ASSERT2(std::int64_t);
}
SECTION("intmax")
{
CAST_ASSERT2(std::intmax_t);
}
SECTION("intptr")
{
CAST_ASSERT2(std::intptr_t);
}
SECTION("uint8") { CAST_ASSERT(std::uint8_t); }
SECTION("uint16") { CAST_ASSERT(std::uint16_t); }
SECTION("uint32") { CAST_ASSERT(std::uint32_t); }
SECTION("uint8")
{
CAST_ASSERT(std::uint8_t);
}
SECTION("uint16")
{
CAST_ASSERT(std::uint16_t);
}
SECTION("uint32")
{
CAST_ASSERT(std::uint32_t);
}
SECTION("uint64") { CAST_ASSERT3(std::uint64_t); }
SECTION("uintmax") { CAST_ASSERT3(std::uintmax_t); }
SECTION("uintptr") { CAST_ASSERT3(std::uintptr_t); }
SECTION("uint64")
{
CAST_ASSERT3(std::uint64_t);
}
SECTION("uintmax")
{
CAST_ASSERT3(std::uintmax_t);
}
SECTION("uintptr")
{
CAST_ASSERT3(std::uintptr_t);
}
SECTION("float") { CAST_ASSERT4(float); }
SECTION("float")
{
CAST_ASSERT4(float);
}
SECTION("freeform")
{