summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Support/ConvertUTF.h24
-rw-r--r--lib/Support/ConvertUTFWrapper.cpp55
-rw-r--r--unittests/Support/CMakeLists.txt1
-rw-r--r--unittests/Support/ConvertUTFTest.cpp65
4 files changed, 143 insertions, 2 deletions
diff --git a/include/llvm/Support/ConvertUTF.h b/include/llvm/Support/ConvertUTF.h
index 1eae6d6622..282036619c 100644
--- a/include/llvm/Support/ConvertUTF.h
+++ b/include/llvm/Support/ConvertUTF.h
@@ -87,8 +87,8 @@
------------------------------------------------------------------------ */
-#ifndef CLANG_BASIC_CONVERTUTF_H
-#define CLANG_BASIC_CONVERTUTF_H
+#ifndef LLVM_SUPPORT_CONVERTUTF_H
+#define LLVM_SUPPORT_CONVERTUTF_H
/* ---------------------------------------------------------------------
The following 4 definitions are compiler-specific.
@@ -112,6 +112,9 @@ typedef unsigned char Boolean; /* 0 or 1 */
#define UNI_MAX_UTF8_BYTES_PER_CODE_POINT 4
+#define UNI_UTF16_BYTE_ORDER_MARK_NATIVE 0xFEFF
+#define UNI_UTF16_BYTE_ORDER_MARK_SWAPPED 0xFFFE
+
typedef enum {
conversionOK, /* conversion successful */
sourceExhausted, /* partial character in source, but hit end */
@@ -165,6 +168,7 @@ unsigned getNumBytesForUTF8(UTF8 firstByte);
/*************************************************************************/
/* Below are LLVM-specific wrappers of the functions above. */
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
@@ -219,6 +223,22 @@ static inline ConversionResult convertUTF8Sequence(const UTF8 **source,
return sourceExhausted;
return ConvertUTF8toUTF32(source, *source + size, &target, target + 1, flags);
}
+
+/**
+ * Returns true if a blob of text starts with a UTF-16 big or little endian byte
+ * order mark.
+ */
+bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
+
+/**
+ * Converts a stream of raw bytes assumed to be UTF16 into a UTF8 std::string.
+ *
+ * \param [in] SrcBytes A buffer of what is assumed to be UTF-16 encoded text.
+ * \param [out] Out Converted UTF-8 is stored here on success.
+ * \returns true on success
+ */
+bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
+
} /* end namespace llvm */
#endif
diff --git a/lib/Support/ConvertUTFWrapper.cpp b/lib/Support/ConvertUTFWrapper.cpp
index 458fbb0b49..e45335ddcb 100644
--- a/lib/Support/ConvertUTFWrapper.cpp
+++ b/lib/Support/ConvertUTFWrapper.cpp
@@ -8,6 +8,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <string>
+#include <vector>
namespace llvm {
@@ -72,5 +75,57 @@ bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr) {
return true;
}
+bool hasUTF16ByteOrderMark(ArrayRef<char> S) {
+ return (S.size() >= 2 &&
+ ((S[0] == '\xff' && S[1] == '\xfe') ||
+ (S[0] == '\xfe' && S[1] == '\xff')));
+}
+
+bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out) {
+ assert(Out.empty());
+
+ // Error out on an uneven byte count.
+ if (SrcBytes.size() % 2)
+ return false;
+
+ // Avoid OOB by returning early on empty input.
+ if (SrcBytes.empty())
+ return true;
+
+ const UTF16 *Src = reinterpret_cast<const UTF16 *>(SrcBytes.begin());
+ const UTF16 *SrcEnd = reinterpret_cast<const UTF16 *>(SrcBytes.end());
+
+ // Byteswap if necessary.
+ std::vector<UTF16> ByteSwapped;
+ if (Src[0] == UNI_UTF16_BYTE_ORDER_MARK_SWAPPED) {
+ ByteSwapped.insert(ByteSwapped.end(), Src, SrcEnd);
+ for (unsigned I = 0, E = ByteSwapped.size(); I != E; ++I)
+ ByteSwapped[I] = llvm::sys::SwapByteOrder_16(ByteSwapped[I]);
+ Src = &ByteSwapped[0];
+ SrcEnd = &ByteSwapped[ByteSwapped.size() - 1] + 1;
+ }
+
+ // Skip the BOM for conversion.
+ if (Src[0] == UNI_UTF16_BYTE_ORDER_MARK_NATIVE)
+ Src++;
+
+ // Just allocate enough space up front. We'll shrink it later.
+ Out.resize(SrcBytes.size() * UNI_MAX_UTF8_BYTES_PER_CODE_POINT);
+ UTF8 *Dst = reinterpret_cast<UTF8 *>(&Out[0]);
+ UTF8 *DstEnd = Dst + Out.size();
+
+ ConversionResult CR =
+ ConvertUTF16toUTF8(&Src, SrcEnd, &Dst, DstEnd, strictConversion);
+ assert(CR != targetExhausted);
+
+ if (CR != conversionOK) {
+ Out.clear();
+ return false;
+ }
+
+ Out.resize(reinterpret_cast<char *>(Dst) - &Out[0]);
+ return true;
+}
+
} // end namespace llvm
diff --git a/unittests/Support/CMakeLists.txt b/unittests/Support/CMakeLists.txt
index fedfb3d225..2cbe730ef7 100644
--- a/unittests/Support/CMakeLists.txt
+++ b/unittests/Support/CMakeLists.txt
@@ -12,6 +12,7 @@ add_llvm_unittest(SupportTests
CommandLineTest.cpp
CompressionTest.cpp
ConstantRangeTest.cpp
+ ConvertUTFTest.cpp
DataExtractorTest.cpp
EndianTest.cpp
ErrorOrTest.cpp
diff --git a/unittests/Support/ConvertUTFTest.cpp b/unittests/Support/ConvertUTFTest.cpp
new file mode 100644
index 0000000000..13ea75b157
--- /dev/null
+++ b/unittests/Support/ConvertUTFTest.cpp
@@ -0,0 +1,65 @@
+//===- llvm/unittest/Support/ConvertUTFTest.cpp - ConvertUTF tests --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ConvertUTF.h"
+#include "gtest/gtest.h"
+#include <string>
+
+using namespace llvm;
+
+TEST(ConvertUTFTest, ConvertUTF16LittleEndianToUTF8String) {
+ // Src is the look of disapproval.
+ static const char Src[] = "\xff\xfe\xa0\x0c_\x00\xa0\x0c";
+ ArrayRef<char> Ref(Src, sizeof(Src) - 1);
+ std::string Result;
+ bool Success = convertUTF16ToUTF8String(Ref, Result);
+ EXPECT_TRUE(Success);
+ std::string Expected("\xe0\xb2\xa0_\xe0\xb2\xa0");
+ EXPECT_EQ(Expected, Result);
+}
+
+TEST(ConvertUTFTest, ConvertUTF16BigEndianToUTF8String) {
+ // Src is the look of disapproval.
+ static const char Src[] = "\xfe\xff\x0c\xa0\x00_\x0c\xa0";
+ ArrayRef<char> Ref(Src, sizeof(Src) - 1);
+ std::string Result;
+ bool Success = convertUTF16ToUTF8String(Ref, Result);
+ EXPECT_TRUE(Success);
+ std::string Expected("\xe0\xb2\xa0_\xe0\xb2\xa0");
+ EXPECT_EQ(Expected, Result);
+}
+
+TEST(ConvertUTFTest, OddLengthInput) {
+ std::string Result;
+ bool Success = convertUTF16ToUTF8String(ArrayRef<char>("xxxxx", 5), Result);
+ EXPECT_FALSE(Success);
+}
+
+TEST(ConvertUTFTest, Empty) {
+ std::string Result;
+ bool Success = convertUTF16ToUTF8String(ArrayRef<char>(), Result);
+ EXPECT_TRUE(Success);
+ EXPECT_TRUE(Result.empty());
+}
+
+TEST(ConvertUTFTest, HasUTF16BOM) {
+ bool HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>("\xff\xfe", 2));
+ EXPECT_TRUE(HasBOM);
+ HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>("\xfe\xff", 2));
+ EXPECT_TRUE(HasBOM);
+ HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>("\xfe\xff ", 3));
+ EXPECT_TRUE(HasBOM); // Don't care about odd lengths.
+ HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>("\xfe\xff\x00asdf", 6));
+ EXPECT_TRUE(HasBOM);
+
+ HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>());
+ EXPECT_FALSE(HasBOM);
+ HasBOM = hasUTF16ByteOrderMark(ArrayRef<char>("\xfe", 1));
+ EXPECT_FALSE(HasBOM);
+}