aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Davis <jdavis@postgresql.org>2024-01-11 12:20:25 -0800
committerJeff Davis <jdavis@postgresql.org>2024-01-11 12:35:29 -0800
commitcf64d4e99f6480ad6d73af82bfbbf2b2e637a7ec (patch)
tree190daa0c75da5cb4f8f2dfaa9fd9fcaa86f329a0
parent29f114b6ff0c4ccad29b3a3707ab2c299647d450 (diff)
downloadpostgresql-cf64d4e99f6480ad6d73af82bfbbf2b2e637a7ec.tar.gz
postgresql-cf64d4e99f6480ad6d73af82bfbbf2b2e637a7ec.zip
Cleanup for unicode-update build target and test.
In preparation for adding more Unicode tables. Discussion: https://postgr.es/m/63cd8625-68fa-4760-844a-6b7f643336f2@ardentperf.com Reviewed-by: Jeremy Schneider
-rw-r--r--src/common/unicode/Makefile6
-rw-r--r--src/common/unicode/category_test.c24
-rw-r--r--src/common/unicode/meson.build44
3 files changed, 37 insertions, 37 deletions
diff --git a/src/common/unicode/Makefile b/src/common/unicode/Makefile
index 30cd75cc6a7..04d81dd5cb5 100644
--- a/src/common/unicode/Makefile
+++ b/src/common/unicode/Makefile
@@ -21,7 +21,7 @@ CPPFLAGS += $(ICU_CFLAGS)
# By default, do nothing.
all:
-update-unicode: unicode_category_table.h unicode_norm_table.h unicode_nonspacing_table.h unicode_east_asian_fw_table.h unicode_normprops_table.h unicode_norm_hashfunc.h unicode_version.h
+update-unicode: unicode_category_table.h unicode_east_asian_fw_table.h unicode_nonspacing_table.h unicode_norm_hashfunc.h unicode_norm_table.h unicode_normprops_table.h unicode_version.h
mv $^ $(top_srcdir)/src/include/common/
$(MAKE) category-check
$(MAKE) normalization-check
@@ -29,7 +29,7 @@ update-unicode: unicode_category_table.h unicode_norm_table.h unicode_nonspacing
# These files are part of the Unicode Character Database. Download
# them on demand. The dependency on Makefile.global is for
# UNICODE_VERSION.
-UnicodeData.txt EastAsianWidth.txt DerivedNormalizationProps.txt CompositionExclusions.txt NormalizationTest.txt: $(top_builddir)/src/Makefile.global
+CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt: $(top_builddir)/src/Makefile.global
$(DOWNLOAD) https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/$(@F)
unicode_version.h: generate-unicode_version.pl
@@ -82,4 +82,4 @@ clean:
rm -f $(OBJS) category_test category_test.o norm_test norm_test.o
distclean: clean
- rm -f UnicodeData.txt EastAsianWidth.txt CompositionExclusions.txt NormalizationTest.txt norm_test_table.h unicode_norm_table.h
+ rm -f CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt norm_test_table.h unicode_category_table.h unicode_norm_table.h
diff --git a/src/common/unicode/category_test.c b/src/common/unicode/category_test.c
index 6cd7cd1a5f8..f1aaac0f613 100644
--- a/src/common/unicode/category_test.c
+++ b/src/common/unicode/category_test.c
@@ -28,9 +28,9 @@
static int
parse_unicode_version(const char *version)
{
- int n,
- major,
- minor;
+ int n PG_USED_FOR_ASSERTS_ONLY;
+ int major;
+ int minor;
n = sscanf(version, "%d.%d", &major, &minor);
@@ -54,8 +54,8 @@ main(int argc, char **argv)
int pg_skipped_codepoints = 0;
int icu_skipped_codepoints = 0;
- printf("Postgres Unicode Version:\t%s\n", PG_UNICODE_VERSION);
- printf("ICU Unicode Version:\t\t%s\n", U_UNICODE_VERSION);
+ printf("category_test: Postgres Unicode version:\t%s\n", PG_UNICODE_VERSION);
+ printf("category_test: ICU Unicode version:\t\t%s\n", U_UNICODE_VERSION);
for (UChar32 code = 0; code <= 0x10ffff; code++)
{
@@ -79,11 +79,11 @@ main(int argc, char **argv)
icu_skipped_codepoints++;
else
{
- printf("FAILURE for codepoint %06x\n", code);
- printf("Postgres category: %02d %s %s\n", pg_category,
+ printf("category_test: FAILURE for codepoint 0x%06x\n", code);
+ printf("category_test: Postgres category: %02d %s %s\n", pg_category,
unicode_category_abbrev(pg_category),
unicode_category_string(pg_category));
- printf("ICU category: %02d %s %s\n", icu_category,
+ printf("category_test: ICU category: %02d %s %s\n", icu_category,
unicode_category_abbrev(icu_category),
unicode_category_string(icu_category));
printf("\n");
@@ -93,16 +93,16 @@ main(int argc, char **argv)
}
if (pg_skipped_codepoints > 0)
- printf("Skipped %d codepoints unassigned in Postgres due to Unicode version mismatch.\n",
+ printf("category_test: skipped %d codepoints unassigned in Postgres due to Unicode version mismatch\n",
pg_skipped_codepoints);
if (icu_skipped_codepoints > 0)
- printf("Skipped %d codepoints unassigned in ICU due to Unicode version mismatch.\n",
+ printf("category_test: skipped %d codepoints unassigned in ICU due to Unicode version mismatch\n",
icu_skipped_codepoints);
- printf("category_test: All tests successful!\n");
+ printf("category_test: success\n");
exit(0);
#else
- printf("ICU support required for test; skipping.\n");
+ printf("category_test: ICU support required for test; skipping\n");
exit(0);
#endif
}
diff --git a/src/common/unicode/meson.build b/src/common/unicode/meson.build
index 02e07cf4f4e..df4f3a4ed1d 100644
--- a/src/common/unicode/meson.build
+++ b/src/common/unicode/meson.build
@@ -11,7 +11,7 @@ endif
# These files are part of the Unicode Character Database. Download them on
# demand.
-foreach f : ['UnicodeData.txt', 'EastAsianWidth.txt', 'DerivedNormalizationProps.txt', 'CompositionExclusions.txt', 'NormalizationTest.txt']
+foreach f : ['CompositionExclusions.txt', 'DerivedNormalizationProps.txt', 'EastAsianWidth.txt', 'NormalizationTest.txt', 'UnicodeData.txt']
url = unicode_baseurl.format(UNICODE_VERSION, f)
target = custom_target(f,
output: f,
@@ -25,15 +25,6 @@ endforeach
update_unicode_targets = []
update_unicode_targets += \
- custom_target('unicode_version.h',
- output: ['unicode_version.h'],
- command: [
- perl, files('generate-unicode_version.pl'),
- '--outdir', '@OUTDIR@', '--version', UNICODE_VERSION],
- build_by_default: false,
- )
-
-update_unicode_targets += \
custom_target('unicode_category_table.h',
input: [unicode_data['UnicodeData.txt']],
output: ['unicode_category_table.h'],
@@ -44,14 +35,12 @@ update_unicode_targets += \
)
update_unicode_targets += \
- custom_target('unicode_norm_table.h',
- input: [unicode_data['UnicodeData.txt'], unicode_data['CompositionExclusions.txt']],
- output: ['unicode_norm_table.h', 'unicode_norm_hashfunc.h'],
- depend_files: perfect_hash_pm,
- command: [
- perl, files('generate-unicode_norm_table.pl'),
- '--outdir', '@OUTDIR@', '@INPUT@'],
+ custom_target('unicode_east_asian_fw_table.h',
+ input: [unicode_data['EastAsianWidth.txt']],
+ output: ['unicode_east_asian_fw_table.h'],
+ command: [perl, files('generate-unicode_east_asian_fw_table.pl'), '@INPUT@'],
build_by_default: false,
+ capture: true,
)
update_unicode_targets += \
@@ -65,12 +54,14 @@ update_unicode_targets += \
)
update_unicode_targets += \
- custom_target('unicode_east_asian_fw_table.h',
- input: [unicode_data['EastAsianWidth.txt']],
- output: ['unicode_east_asian_fw_table.h'],
- command: [perl, files('generate-unicode_east_asian_fw_table.pl'), '@INPUT@'],
+ custom_target('unicode_norm_table.h',
+ input: [unicode_data['UnicodeData.txt'], unicode_data['CompositionExclusions.txt']],
+ output: ['unicode_norm_table.h', 'unicode_norm_hashfunc.h'],
+ depend_files: perfect_hash_pm,
+ command: [
+ perl, files('generate-unicode_norm_table.pl'),
+ '--outdir', '@OUTDIR@', '@INPUT@'],
build_by_default: false,
- capture: true,
)
update_unicode_targets += \
@@ -83,6 +74,15 @@ update_unicode_targets += \
capture: true,
)
+update_unicode_targets += \
+ custom_target('unicode_version.h',
+ output: ['unicode_version.h'],
+ command: [
+ perl, files('generate-unicode_version.pl'),
+ '--outdir', '@OUTDIR@', '--version', UNICODE_VERSION],
+ build_by_default: false,
+ )
+
norm_test_table = custom_target('norm_test_table.h',
input: [unicode_data['NormalizationTest.txt']],
output: ['norm_test_table.h'],