Merge branch 'master' into sqlite-refactor

This commit is contained in:
Dane Springmeyer 2011-11-03 09:44:02 -04:00
commit a92c46ff5d
23 changed files with 311 additions and 102 deletions

1
.gitignore vendored
View file

@ -1,3 +1,4 @@
.DS_Store
*~
*.o
*.pyc

View file

@ -450,6 +450,7 @@ pickle_store = [# Scons internal variables
'CAIROMM_LIBPATHS',
'CAIROMM_LINKFLAGS',
'CAIROMM_CPPPATHS',
'SVG_RENDERER',
]
# Add all other user configurable options to pickle pickle_store

View file

@ -50,7 +50,10 @@ static inline void scale_grid(mapnik::grid::data_type & target,
int th2 = target_height/2;
int offs_x = rint((source_width-target_width-x_off_f*2*source_width)/2);
int offs_y = rint((source_height-target_height-y_off_f*2*source_height)/2);
unsigned yprt, yprt1, xprt, xprt1;
unsigned yprt(0);
unsigned yprt1(0);
unsigned xprt(0);
unsigned xprt1(0);
//no scaling or subpixel offset
if (target_height == source_height && target_width == source_width && offs_x == 0 && offs_y == 0){

View file

@ -126,7 +126,7 @@ void csv_datasource::parse_csv(T& stream,
std::string const& quote) const
{
stream.seekg (0, std::ios::end);
int file_length_ = stream.tellg();
file_length_ = stream.tellg();
if (filesize_max_ > 0)
{
@ -150,7 +150,7 @@ void csv_datasource::parse_csv(T& stream,
char newline = '\n';
int newline_count = 0;
int carriage_count = 0;
for(std::size_t idx = 0; idx < file_length_; idx++)
for(unsigned idx = 0; idx < file_length_; idx++)
{
char c = static_cast<char>(stream.get());
if (c == '\n')
@ -190,11 +190,11 @@ void csv_datasource::parse_csv(T& stream,
{
// default to ','
sep = ",";
int num_commas = std::count(csv_line.begin(), csv_line.end(), ',');
// detect tabs
int num_tabs = std::count(csv_line.begin(), csv_line.end(), '\t');
if (num_tabs > 0)
{
int num_commas = std::count(csv_line.begin(), csv_line.end(), ',');
if (num_tabs > num_commas)
{
sep = "\t";
@ -203,6 +203,28 @@ void csv_datasource::parse_csv(T& stream,
#endif
}
}
else // pipes
{
int num_pipes = std::count(csv_line.begin(), csv_line.end(), '|');
if (num_pipes > num_commas)
{
sep = "|";
#ifdef MAPNIK_DEBUG
std::clog << "CSV Plugin: auto detected '|' separator\n";
#endif
}
else // semicolons
{
int num_semicolons = std::count(csv_line.begin(), csv_line.end(), ';');
if (num_semicolons > num_commas)
{
sep = ";";
#ifdef MAPNIK_DEBUG
std::clog << "CSV Plugin: auto detected ';' separator\n";
#endif
}
}
}
}
// set back to start
@ -301,12 +323,22 @@ void csv_datasource::parse_csv(T& stream,
val = boost::trim_copy(*beg);
if (val.empty())
{
std::ostringstream s;
s << "CSV Plugin: expected a column header at line "
<< line_number << ", column " << idx
<< " - ensure this row contains valid header fields: '"
<< csv_line << "'\n";
throw mapnik::datasource_exception(s.str());
if (strict_)
{
std::ostringstream s;
s << "CSV Plugin: expected a column header at line "
<< line_number << ", column " << idx
<< " - ensure this row contains valid header fields: '"
<< csv_line << "'\n";
throw mapnik::datasource_exception(s.str());
}
else
{
// create a placeholder for the empty header
std::ostringstream s;
s << "_" << idx;
headers_.push_back(s.str());
}
}
else
{
@ -356,7 +388,7 @@ void csv_datasource::parse_csv(T& stream,
int feature_count(1);
bool extent_initialized = false;
int num_headers = headers_.size();
unsigned num_headers = headers_.size();
mapnik::transcoder tr(desc_.get_encoding());
while (std::getline(stream,csv_line,newline))
@ -386,7 +418,7 @@ void csv_datasource::parse_csv(T& stream,
// early return for strict mode
if (strict_)
{
int num_fields = std::distance(beg,tok.end());
unsigned num_fields = std::distance(beg,tok.end());
if (num_fields != num_headers)
{
std::ostringstream s;
@ -401,28 +433,26 @@ void csv_datasource::parse_csv(T& stream,
bool parsed_x = false;
bool parsed_y = false;
bool parsed_wkt = false;
bool skip = false;
bool null_geom = false;
std::vector<std::string> collected;
int i = -1;
for (;beg != tok.end(); ++beg)
for (unsigned i = 0; i < num_headers; ++i)
{
++i;
std::string value = boost::trim_copy(*beg);
// avoid range error if trailing separator
if (i >= num_headers)
{
#ifdef MAPNIK_DEBUG
std::clog << "CSV Plugin: messed up line encountered where # values > # column headers at: " << line_number << "\n";
#endif
skip = true;
break;
}
std::string fld_name(headers_.at(i));
collected.push_back(fld_name);
std::string value;
if (beg == tok.end())
{
boost::put(*feature,fld_name,mapnik::value_null());
continue;
}
else
{
value = boost::trim_copy(*beg);
++beg;
}
int value_length = value.length();
// parse wkt
@ -451,7 +481,7 @@ void csv_datasource::parse_csv(T& stream,
),
ascii::space);
if (r /*&& (str_beg != str_end)*/)
if (r && (str_beg == str_end))
{
mapnik::geometry_type * pt = new mapnik::geometry_type(mapnik::Point);
pt->move_to(x,y);
@ -573,22 +603,26 @@ void csv_datasource::parse_csv(T& stream,
}
// add all values as attributes
// here we detect numbers and treat everything else as pure strings
// this is intentional since boolean and null types are not common in csv editors
if (value.empty())
{
boost::put(*feature,fld_name,mapnik::value_null());
}
// only true strings are this long
else if (value_length > 20
// TODO - clean up this messiness which is here temporarily
// to protect against the improperly working spirit parsing below
|| value.find(",") != std::string::npos
|| value.find(":") != std::string::npos
|| (std::count(value.begin(), value.end(), '-') > 1))
{
UnicodeString ustr = tr.transcode(value.c_str());
boost::put(*feature,fld_name,ustr);
if (feature_count == 1)
{
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::String));
}
}
// only true strings are this long
else if (value_length > 20)
{
UnicodeString ustr = tr.transcode(value.c_str());
boost::put(*feature,fld_name,ustr);
if (feature_count == 1)
{
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::String));
}
}
else if ((value[0] >= '0' && value[0] <= '9') || value[0] == '-')
@ -597,20 +631,24 @@ void csv_datasource::parse_csv(T& stream,
std::string::const_iterator str_beg = value.begin();
std::string::const_iterator str_end = value.end();
bool r = qi::phrase_parse(str_beg,str_end,qi::double_,ascii::space,float_val);
if (r)
if (r && (str_beg == str_end))
{
if (value.find(".") != std::string::npos)
{
boost::put(*feature,fld_name,float_val);
if (feature_count == 1)
{
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::Double));
}
}
else
{
int val = static_cast<int>(float_val);
boost::put(*feature,fld_name,val);
if (feature_count == 1)
{
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::Integer));
}
}
}
else
@ -619,54 +657,24 @@ void csv_datasource::parse_csv(T& stream,
UnicodeString ustr = tr.transcode(value.c_str());
boost::put(*feature,fld_name,ustr);
if (feature_count == 1)
{
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::String));
}
}
}
else
{
std::string value_lower = boost::algorithm::to_lower_copy(value);
if (value_lower == "true")
// fallback to normal string
UnicodeString ustr = tr.transcode(value.c_str());
boost::put(*feature,fld_name,ustr);
if (feature_count == 1)
{
boost::put(*feature,fld_name,true);
if (feature_count == 1)
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::Boolean));
}
else if(value_lower == "false")
{
boost::put(*feature,fld_name,false);
if (feature_count == 1)
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::Boolean));
}
else
{
// fallback to normal string
UnicodeString ustr = tr.transcode(value.c_str());
boost::put(*feature,fld_name,ustr);
if (feature_count == 1)
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::String));
desc_.add_descriptor(mapnik::attribute_descriptor(fld_name,mapnik::String));
}
}
}
if (skip)
{
++line_number;
std::ostringstream s;
s << "CSV Plugin: # values > # column headers"
<< "for line " << line_number << " - found " << headers_.size()
<< " with values like: " << csv_line << "\n";
//<< "for: " << boost::algorithm::join(collected, ",") << "\n";
if (strict_)
{
throw mapnik::datasource_exception(s.str());
}
else
{
if (!quiet_) std::clog << s.str() << "\n";
continue;
}
}
else if (null_geom)
if (null_geom)
{
++line_number;
std::ostringstream s;
@ -769,6 +777,17 @@ void csv_datasource::parse_csv(T& stream,
}
++line_number;
}
catch (const mapnik::datasource_exception & ex )
{
if (strict_)
{
throw mapnik::datasource_exception(ex.what());
}
else
{
if (!quiet_) std::clog << ex.what() << "\n";
}
}
catch (const std::exception & ex )
{
std::ostringstream s;

View file

@ -29,7 +29,7 @@ class csv_datasource : public mapnik::datasource
mutable mapnik::box2d<double> extent_;
mutable std::string filename_;
mutable std::string inline_string_;
mutable int file_length_;
mutable unsigned file_length_;
mutable int row_limit_;
mutable std::vector<mapnik::feature_ptr> features_;
mutable std::string escape_;

View file

@ -70,8 +70,10 @@ if env['THREADING'] == 'multi':
lib_env['LIBS'].append('boost_thread%s' % env['BOOST_APPEND'])
if not env['RUNTIME_LINK'] == 'static':
if env['RUNTIME_LINK'] == 'static':
if 'icuuc' in env['ICU_LIB_NAME']:
lib_env['LIBS'].append('icudata')
else:
if env['INTERNAL_LIBAGG']:
lib_env['LIBS'].insert(0, 'agg')
else:

View file

@ -1,5 +1,6 @@
import os
import glob
from copy import copy
Import ('env')
@ -9,10 +10,9 @@ filesystem = 'boost_filesystem%s' % env['BOOST_APPEND']
system = 'boost_system%s' % env['BOOST_APPEND']
regex = 'boost_regex%s' % env['BOOST_APPEND']
libraries = [filesystem, 'mapnik2']
libraries.append(env['ICU_LIB_NAME'])
libraries.append(regex)
libraries.append(system)
libraries = copy(env['LIBMAPNIK_LIBS'])
libraries.append('mapnik2')
for cpp_test in glob.glob('path_element_test.cpp'):
env.Program(cpp_test.replace('.cpp',''), [cpp_test], CPPPATH=headers, LIBS=libraries)
for cpp_test in glob.glob('*_test.cpp'):
test_program = env.Program(cpp_test.replace('.cpp',''), [cpp_test], CPPPATH=headers, LIBS=libraries, LINKFLAGS=env['CUSTOM_LDFLAGS'])
Depends(test_program, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))

View file

@ -37,7 +37,7 @@ BOOST_AUTO_TEST_CASE(combined_test_case)
svg_ren renderer(map, output_stream_iterator);
renderer.apply();
std::string expected_output =
/*std::string expected_output =
svg_ren::XML_DECLARATION
+ "\n"
+ svg_ren::SVG_DTD
@ -52,5 +52,6 @@ BOOST_AUTO_TEST_CASE(combined_test_case)
std::string actual_output = output_stream.str();
BOOST_CHECK_EQUAL(actual_output, expected_output);
*/
}

View file

@ -26,9 +26,9 @@ using namespace mapnik;
void prepare_map(Map& m)
{
const std::string mapnik_dir("../../..");
std::cout << " looking for 'shape.input' plugin in... " << mapnik_dir << "/plugins/input/" << "\n";
datasource_cache::instance()->register_datasources(mapnik_dir + "/plugins/input/");
const std::string mapnik_dir("/usr/local/lib/mapnik2/");
std::cout << " looking for 'shape.input' plugin in... " << mapnik_dir << "input/" << "\n";
datasource_cache::instance()->register_datasources(mapnik_dir + "input/");
// create styles
@ -132,7 +132,7 @@ void prepare_map(Map& m)
{
parameters p;
p["type"]="shape";
p["file"]=mapnik_dir+"/demo/data/boundaries";
p["file"]="../../../demo/data/boundaries";
layer lyr("Provinces");
lyr.set_datasource(datasource_cache::instance()->create(p));
@ -144,7 +144,7 @@ void prepare_map(Map& m)
{
parameters p;
p["type"]="shape";
p["file"]=mapnik_dir+"/demo/data/qcdrainage";
p["file"]="../../../demo/data/qcdrainage";
layer lyr("Quebec Hydrography");
lyr.set_datasource(datasource_cache::instance()->create(p));
lyr.add_style("drainage");
@ -154,7 +154,7 @@ void prepare_map(Map& m)
{
parameters p;
p["type"]="shape";
p["file"]=mapnik_dir+"/demo/data/ontdrainage";
p["file"]="../../../demo/data/ontdrainage";
layer lyr("Ontario Hydrography");
lyr.set_datasource(datasource_cache::instance()->create(p));
@ -166,7 +166,7 @@ void prepare_map(Map& m)
{
parameters p;
p["type"]="shape";
p["file"]=mapnik_dir+"/demo/data/boundaries_l";
p["file"]="../../../demo/data/boundaries_l";
layer lyr("Provincial borders");
lyr.set_datasource(datasource_cache::instance()->create(p));
lyr.add_style("provlines");
@ -177,7 +177,7 @@ void prepare_map(Map& m)
{
parameters p;
p["type"]="shape";
p["file"]=mapnik_dir+"/demo/data/roads";
p["file"]="../../../demo/data/roads";
layer lyr("Roads");
lyr.set_datasource(datasource_cache::instance()->create(p));
lyr.add_style("smallroads");

View file

@ -1,4 +1,4 @@
x,y,text,date,integer,boolean,float,time,datetime,empty_column
"x","y","text","date","integer","boolean","float","time","datetime","empty_column"
0,0,a b,1971-01-01,40,True,1.0,04:14:00,1971-01-01T04:14:00,
0,0,c d,1948-01-01,63,True,1.27,14:57:13,1948-01-01T14:57:13,
0,0,e f,1920-01-01,164,False,41800000.01,00:00:00,1920-01-01T00:00:00,

1 x y text date integer boolean float time datetime empty_column
2 0 0 a b 1971-01-01 40 True 1.0 04:14:00 1971-01-01T04:14:00
3 0 0 c d 1948-01-01 63 True 1.27 14:57:13 1948-01-01T14:57:13
4 0 0 e f 1920-01-01 164 False 41800000.01 00:00:00 1920-01-01T00:00:00

View file

Can't render this file because it contains an unexpected character in line 2 and column 86.

View file

@ -1 +1 @@
x,y,z 1,10,0
x,y,z 1,10,9999.9999
1 x y z 1 10 0 9999.9999

View file

@ -0,0 +1,2 @@
one,two,x,y,,aftermissing
one,two,0,0,missing,aftermissing
1 one two x y aftermissing
2 one two 0 0 missing aftermissing

View file

@ -0,0 +1,3 @@
x,y,null,boolean
0,0,null,true
0,0,,false
1 x y null boolean
2 0 0 null true
3 0 0 false

View file

@ -0,0 +1,2 @@
x,y,1990,1991,1992
0,0,1,2,3
1 x y 1990 1991 1992
2 0 0 1 2 3

3
tests/data/csv/nypd.csv Normal file
View file

@ -0,0 +1,3 @@
Precinct,Phone,Address,City,geo_longitude,geo_latitude,geo_accuracy
5th Precinct,(212) 334-0711,19 Elizabeth Street,"New York, NY",-70.0,40.0,house
9th Precinct,(212) 477-7811,130 Avenue C,"New York, NY",-73.0,41.0,house
1 Precinct Phone Address City geo_longitude geo_latitude geo_accuracy
2 5th Precinct (212) 334-0711 19 Elizabeth Street New York, NY -70.0 40.0 house
3 9th Precinct (212) 477-7811 130 Avenue C New York, NY -73.0 41.0 house

View file

@ -0,0 +1,2 @@
x|y|z
0|0|hello
1 x y z
2 0 0 hello

View file

@ -0,0 +1,2 @@
x;y;z
0;0;hello
1 x y z
2 0 0 hello

View file

@ -1,2 +1,2 @@
x, y,z
x, y,z
-122 , 48,0
1 x y z z
2 -122 48 0 0

View file

@ -1,2 +1,2 @@
x,y,z
0,0,0
1,10,9999.9999
1 x y z
2 0 1 0 10 0 9999.9999

View file

@ -15,6 +15,9 @@ def setup():
if 'csv' in mapnik2.DatasourceCache.instance().plugin_names():
def get_csv_ds(filename):
return mapnik2.Datasource(type='csv',file=os.path.join('../data/csv/',filename),quiet=True)
def test_broken_files(visual=False):
broken = glob.glob("../data/csv/fails/*.*")
broken.extend(glob.glob("../data/csv/warns/*.*"))
@ -42,7 +45,174 @@ if 'csv' in mapnik2.DatasourceCache.instance().plugin_names():
print '\x1b[1;32m✓ \x1b[0m', csv
except Exception:
print '\x1b[33mfailed\x1b[0m',csv
def test_type_detection(**kwargs):
ds = get_csv_ds('nypd.csv')
eq_(ds.fields(),['Precinct','Phone','Address','City','geo_longitude','geo_latitude','geo_accuracy'])
eq_(ds.field_types(),['str','str','str','str','float','float','str'])
feat = ds.featureset().next()
attr = {'City': u'New York, NY', 'geo_accuracy': u'house', 'Phone': u'(212) 334-0711', 'Address': u'19 Elizabeth Street', 'Precinct': u'5th Precinct', 'geo_longitude': -70, 'geo_latitude': 40}
eq_(feat.attributes,attr)
eq_(len(ds.all_features()),2)
def test_skipping_blank_rows(**kwargs):
ds = get_csv_ds('blank_rows.csv')
eq_(ds.fields(),['x','y','name'])
eq_(ds.field_types(),['int','int','str'])
eq_(len(ds.all_features()),2)
def test_empty_rows(**kwargs):
ds = get_csv_ds('empty_rows.csv')
eq_(len(ds.fields()),10)
eq_(len(ds.field_types()),10)
eq_(ds.fields(),['x', 'y', 'text', 'date', 'integer', 'boolean', 'float', 'time', 'datetime', 'empty_column'])
eq_(ds.field_types(),['int', 'int', 'str', 'str', 'int', 'str', 'float', 'str', 'str', 'str'])
fs = ds.featureset()
feat = fs.next()
attr = {'x': 0, 'empty_column': u'', 'text': u'a b', 'float': 1.0, 'datetime': u'1971-01-01T04:14:00', 'y': 0, 'boolean': u'True', 'time': u'04:14:00', 'date': u'1971-01-01', 'integer': 40}
eq_(feat.attributes,attr)
while feat:
eq_(len(feat),10)
eq_(feat['empty_column'],u'')
feat = fs.next()
def test_slashes(**kwargs):
ds = get_csv_ds('has_attributes_with_slashes.csv')
eq_(len(ds.fields()),3)
fs = ds.all_features()
eq_(fs[0].attributes,{'x':0,'y':0,'name':u'a/a'})
eq_(fs[1].attributes,{'x':1,'y':4,'name':u'b/b'})
eq_(fs[2].attributes,{'x':10,'y':2.5,'name':u'c/c'})
def test_wkt_field(**kwargs):
ds = get_csv_ds('wkt.csv')
eq_(len(ds.fields()),2)
eq_(ds.fields(),['type','WKT'])
eq_(ds.field_types(),['str','str'])
fs = ds.all_features()
#import pdb;pdb.set_trace()
eq_(len(fs[0].geometries()),1)
eq_(fs[0].geometries()[0].type(),mapnik2.GeometryType.Point)
eq_(len(fs[1].geometries()),1)
eq_(fs[1].geometries()[0].type(),mapnik2.GeometryType.LineString)
eq_(len(fs[2].geometries()),1)
eq_(fs[2].geometries()[0].type(),mapnik2.GeometryType.Polygon)
eq_(len(fs[3].geometries()),1) # one geometry, two parts
eq_(fs[3].geometries()[0].type(),mapnik2.GeometryType.Polygon)
# tests assuming we want to flatten geometries
# ideally we should not have to:
# https://github.com/mapnik/mapnik/issues?labels=multigeom+robustness&sort=created&direction=desc&state=open&page=1
eq_(len(fs[4].geometries()),4)
eq_(fs[4].geometries()[0].type(),mapnik2.GeometryType.Point)
eq_(len(fs[5].geometries()),2)
eq_(fs[5].geometries()[0].type(),mapnik2.GeometryType.LineString)
eq_(len(fs[6].geometries()),2)
eq_(fs[6].geometries()[0].type(),mapnik2.GeometryType.Polygon)
eq_(len(fs[7].geometries()),2)
eq_(fs[7].geometries()[0].type(),mapnik2.GeometryType.Polygon)
def test_handling_of_missing_header(**kwargs):
ds = get_csv_ds('missing_header.csv')
eq_(len(ds.fields()),6)
eq_(ds.fields(),['one','two','x','y','_4','aftermissing'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['_4'],'missing')
def test_handling_of_headers_that_are_numbers(**kwargs):
ds = get_csv_ds('numbers_for_headers.csv')
eq_(len(ds.fields()),5)
eq_(ds.fields(),['x','y','1990','1991','1992'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],0)
eq_(feat['y'],0)
eq_(feat['1990'],1)
eq_(feat['1991'],2)
eq_(feat['1992'],3)
eq_(mapnik2.Expression("[1991]=2").evaluate(feat),True)
def test_quoted_numbers(**kwargs):
ds = get_csv_ds('points.csv')
eq_(len(ds.fields()),3)
eq_(ds.fields(),['x','y','label'])
fs = ds.all_features()
eq_(fs[0]['label'],"0,0")
eq_(fs[1]['label'],"5,5")
eq_(fs[2]['label'],"0,5")
eq_(fs[3]['label'],"5,0")
eq_(fs[4]['label'],"2.5,2.5")
def test_windows_newlines(**kwargs):
ds = get_csv_ds('windows_newlines.csv')
eq_(len(ds.fields()),3)
feats = ds.all_features()
eq_(len(feats),1)
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],1)
eq_(feat['y'],10)
eq_(feat['z'],9999.9999)
def test_mac_newlines(**kwargs):
ds = get_csv_ds('windows_newlines.csv')
eq_(len(ds.fields()),3)
feats = ds.all_features()
eq_(len(feats),1)
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],1)
eq_(feat['y'],10)
eq_(feat['z'],9999.9999)
def test_tabs(**kwargs):
ds = get_csv_ds('tabs_in_csv.csv')
eq_(len(ds.fields()),3)
eq_(ds.fields(),['x','y','z'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],-122)
eq_(feat['y'],48)
eq_(feat['z'],0)
def test_separator_pipes(**kwargs):
ds = get_csv_ds('pipe_delimiters.csv')
eq_(len(ds.fields()),3)
eq_(ds.fields(),['x','y','z'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],0)
eq_(feat['y'],0)
eq_(feat['z'],'hello')
def test_separator_semicolon(**kwargs):
ds = get_csv_ds('semicolon_delimiters.csv')
eq_(len(ds.fields()),3)
eq_(ds.fields(),['x','y','z'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],0)
eq_(feat['y'],0)
eq_(feat['z'],'hello')
def test_that_null_and_bool_keywords_are_empty_strings(**kwargs):
ds = get_csv_ds('nulls_and_booleans_as_strings.csv')
eq_(len(ds.fields()),4)
eq_(ds.fields(),['x','y','null','boolean'])
eq_(ds.field_types(),['int','int','str','str'])
fs = ds.featureset()
feat = fs.next()
eq_(feat['x'],0)
eq_(feat['y'],0)
eq_(feat['null'],'null')
eq_(feat['boolean'],'true')
feat = fs.next()
eq_(feat['x'],0)
eq_(feat['y'],0)
eq_(feat['null'],'')
eq_(feat['boolean'],'false')
if __name__ == "__main__":
setup()

View file

@ -262,8 +262,6 @@ private:
{
if (node && node->ext_.contains(item_ext))
{
coord2d c=node->ext_.center();
double width=node->ext_.width();
double height=node->ext_.height();