]> granicus.if.org Git - postgresql/blob - src/backend/utils/adt/json.c
Use correct length to convert json unicode escapes.
[postgresql] / src / backend / utils / adt / json.c
1 /*-------------------------------------------------------------------------
2  *
3  * json.c
4  *              JSON data type support.
5  *
6  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/utils/adt/json.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15
16 #include "access/htup_details.h"
17 #include "access/transam.h"
18 #include "catalog/pg_cast.h"
19 #include "catalog/pg_type.h"
20 #include "executor/spi.h"
21 #include "lib/stringinfo.h"
22 #include "libpq/pqformat.h"
23 #include "mb/pg_wchar.h"
24 #include "parser/parse_coerce.h"
25 #include "utils/array.h"
26 #include "utils/builtins.h"
27 #include "utils/lsyscache.h"
28 #include "utils/json.h"
29 #include "utils/jsonapi.h"
30 #include "utils/typcache.h"
31 #include "utils/syscache.h"
32
33 /*
34  * The context of the parser is maintained by the recursive descent
35  * mechanism, but is passed explicitly to the error reporting routine
36  * for better diagnostics.
37  */
38 typedef enum                                    /* contexts of JSON parser */
39 {
40         JSON_PARSE_VALUE,                       /* expecting a value */
41         JSON_PARSE_STRING,                      /* expecting a string (for a field name) */
42         JSON_PARSE_ARRAY_START,         /* saw '[', expecting value or ']' */
43         JSON_PARSE_ARRAY_NEXT,          /* saw array element, expecting ',' or ']' */
44         JSON_PARSE_OBJECT_START,        /* saw '{', expecting label or '}' */
45         JSON_PARSE_OBJECT_LABEL,        /* saw object label, expecting ':' */
46         JSON_PARSE_OBJECT_NEXT,         /* saw object value, expecting ',' or '}' */
47         JSON_PARSE_OBJECT_COMMA,        /* saw object ',', expecting next label */
48         JSON_PARSE_END                          /* saw the end of a document, expect nothing */
49 }       JsonParseContext;
50
51 static inline void json_lex(JsonLexContext *lex);
52 static inline void json_lex_string(JsonLexContext *lex);
53 static inline void json_lex_number(JsonLexContext *lex, char *s);
54 static inline void parse_scalar(JsonLexContext *lex, JsonSemAction sem);
55 static void parse_object_field(JsonLexContext *lex, JsonSemAction sem);
56 static void parse_object(JsonLexContext *lex, JsonSemAction sem);
57 static void parse_array_element(JsonLexContext *lex, JsonSemAction sem);
58 static void parse_array(JsonLexContext *lex, JsonSemAction sem);
59 static void report_parse_error(JsonParseContext ctx, JsonLexContext *lex);
60 static void report_invalid_token(JsonLexContext *lex);
61 static int      report_json_context(JsonLexContext *lex);
62 static char *extract_mb_char(char *s);
63 static void composite_to_json(Datum composite, StringInfo result,
64                                   bool use_line_feeds);
65 static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims,
66                                   Datum *vals, bool *nulls, int *valcount,
67                                   TYPCATEGORY tcategory, Oid typoutputfunc,
68                                   bool use_line_feeds);
69 static void array_to_json_internal(Datum array, StringInfo result,
70                                            bool use_line_feeds);
71
72 /* the null action object used for pure validation */
73 static jsonSemAction nullSemAction =
74 {
75         NULL, NULL, NULL, NULL, NULL,
76         NULL, NULL, NULL, NULL, NULL
77 };
78 static JsonSemAction NullSemAction = &nullSemAction;
79
80 /* Recursive Descent parser support routines */
81
82 /*
83  * lex_peek
84  *
85  * what is the current look_ahead token?
86 */
87 static inline JsonTokenType
88 lex_peek(JsonLexContext *lex)
89 {
90         return lex->token_type;
91 }
92
93 /*
94  * lex_accept
95  *
96  * accept the look_ahead token and move the lexer to the next token if the
97  * look_ahead token matches the token parameter. In that case, and if required,
98  * also hand back the de-escaped lexeme.
99  *
100  * returns true if the token matched, false otherwise.
101  */
102 static inline bool
103 lex_accept(JsonLexContext *lex, JsonTokenType token, char **lexeme)
104 {
105         if (lex->token_type == token)
106         {
107                 if (lexeme != NULL)
108                 {
109                         if (lex->token_type == JSON_TOKEN_STRING)
110                         {
111                                 if (lex->strval != NULL)
112                                         *lexeme = pstrdup(lex->strval->data);
113                         }
114                         else
115                         {
116                                 int                     len = (lex->token_terminator - lex->token_start);
117                                 char       *tokstr = palloc(len + 1);
118
119                                 memcpy(tokstr, lex->token_start, len);
120                                 tokstr[len] = '\0';
121                                 *lexeme = tokstr;
122                         }
123                 }
124                 json_lex(lex);
125                 return true;
126         }
127         return false;
128 }
129
130 /*
131  * lex_accept
132  *
133  * move the lexer to the next token if the current look_ahead token matches
134  * the parameter token. Otherwise, report an error.
135  */
136 static inline void
137 lex_expect(JsonParseContext ctx, JsonLexContext *lex, JsonTokenType token)
138 {
139         if (!lex_accept(lex, token, NULL))
140                 report_parse_error(ctx, lex);;
141 }
142
143 /*
144  * All the defined      type categories are upper case , so use lower case here
145  * so we avoid any possible clash.
146  */
147 /* fake type category for JSON so we can distinguish it in datum_to_json */
148 #define TYPCATEGORY_JSON 'j'
149 /* fake category for types that have a cast to json */
150 #define TYPCATEGORY_JSON_CAST 'c'
151 /* letters appearing in numeric output that aren't valid in a JSON number */
152 #define NON_NUMERIC_LETTER "NnAaIiFfTtYy"
153 /* chars to consider as part of an alphanumeric token */
154 #define JSON_ALPHANUMERIC_CHAR(c)  \
155         (((c) >= 'a' && (c) <= 'z') || \
156          ((c) >= 'A' && (c) <= 'Z') || \
157          ((c) >= '0' && (c) <= '9') || \
158          (c) == '_' || \
159          IS_HIGHBIT_SET(c))
160
161 /*
162  * Input.
163  */
164 Datum
165 json_in(PG_FUNCTION_ARGS)
166 {
167         char       *json = PG_GETARG_CSTRING(0);
168         text       *result = cstring_to_text(json);
169         JsonLexContext *lex;
170
171         /* validate it */
172         lex = makeJsonLexContext(result, false);
173         pg_parse_json(lex, NullSemAction);
174
175         /* Internal representation is the same as text, for now */
176         PG_RETURN_TEXT_P(result);
177 }
178
179 /*
180  * Output.
181  */
182 Datum
183 json_out(PG_FUNCTION_ARGS)
184 {
185         /* we needn't detoast because text_to_cstring will handle that */
186         Datum           txt = PG_GETARG_DATUM(0);
187
188         PG_RETURN_CSTRING(TextDatumGetCString(txt));
189 }
190
191 /*
192  * Binary send.
193  */
194 Datum
195 json_send(PG_FUNCTION_ARGS)
196 {
197         text       *t = PG_GETARG_TEXT_PP(0);
198         StringInfoData buf;
199
200         pq_begintypsend(&buf);
201         pq_sendtext(&buf, VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t));
202         PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
203 }
204
205 /*
206  * Binary receive.
207  */
208 Datum
209 json_recv(PG_FUNCTION_ARGS)
210 {
211         StringInfo      buf = (StringInfo) PG_GETARG_POINTER(0);
212         text       *result;
213         char       *str;
214         int                     nbytes;
215         JsonLexContext *lex;
216
217         str = pq_getmsgtext(buf, buf->len - buf->cursor, &nbytes);
218
219         result = palloc(nbytes + VARHDRSZ);
220         SET_VARSIZE(result, nbytes + VARHDRSZ);
221         memcpy(VARDATA(result), str, nbytes);
222
223         /* Validate it. */
224         lex = makeJsonLexContext(result, false);
225         pg_parse_json(lex, NullSemAction);
226
227         PG_RETURN_TEXT_P(result);
228 }
229
230 /*
231  * makeJsonLexContext
232  *
233  * lex constructor, with or without StringInfo object
234  * for de-escaped lexemes.
235  *
236  * Without is better as it makes the processing faster, so only make one
237  * if really required.
238  */
239 JsonLexContext *
240 makeJsonLexContext(text *json, bool need_escapes)
241 {
242         JsonLexContext *lex = palloc0(sizeof(JsonLexContext));
243
244         lex->input = lex->token_terminator = lex->line_start = VARDATA(json);
245         lex->line_number = 1;
246         lex->input_length = VARSIZE(json) - VARHDRSZ;
247         if (need_escapes)
248                 lex->strval = makeStringInfo();
249         return lex;
250 }
251
252 /*
253  * pg_parse_json
254  *
255  * Publicly visible entry point for the JSON parser.
256  *
257  * lex is a lexing context, set up for the json to be processed by calling
258  * makeJsonLexContext(). sem is a strucure of function pointers to semantic
259  * action routines to be called at appropriate spots during parsing, and a
260  * pointer to a state object to be passed to those routines.
261  */
262 void
263 pg_parse_json(JsonLexContext *lex, JsonSemAction sem)
264 {
265         JsonTokenType tok;
266
267         /* get the initial token */
268         json_lex(lex);
269
270         tok = lex_peek(lex);
271
272         /* parse by recursive descent */
273         switch (tok)
274         {
275                 case JSON_TOKEN_OBJECT_START:
276                         parse_object(lex, sem);
277                         break;
278                 case JSON_TOKEN_ARRAY_START:
279                         parse_array(lex, sem);
280                         break;
281                 default:
282                         parse_scalar(lex, sem);         /* json can be a bare scalar */
283         }
284
285         lex_expect(JSON_PARSE_END, lex, JSON_TOKEN_END);
286
287 }
288
289 /*
290  *      Recursive Descent parse routines. There is one for each structural
291  *      element in a json document:
292  *        - scalar (string, number, true, false, null)
293  *        - array  ( [ ] )
294  *        - array element
295  *        - object ( { } )
296  *        - object field
297  */
298 static inline void
299 parse_scalar(JsonLexContext *lex, JsonSemAction sem)
300 {
301         char       *val = NULL;
302         json_scalar_action sfunc = sem->scalar;
303         char      **valaddr;
304         JsonTokenType tok = lex_peek(lex);
305
306         valaddr = sfunc == NULL ? NULL : &val;
307
308         /* a scalar must be a string, a number, true, false, or null */
309         switch (tok)
310         {
311                 case JSON_TOKEN_TRUE:
312                         lex_accept(lex, JSON_TOKEN_TRUE, valaddr);
313                         break;
314                 case JSON_TOKEN_FALSE:
315                         lex_accept(lex, JSON_TOKEN_FALSE, valaddr);
316                         break;
317                 case JSON_TOKEN_NULL:
318                         lex_accept(lex, JSON_TOKEN_NULL, valaddr);
319                         break;
320                 case JSON_TOKEN_NUMBER:
321                         lex_accept(lex, JSON_TOKEN_NUMBER, valaddr);
322                         break;
323                 case JSON_TOKEN_STRING:
324                         lex_accept(lex, JSON_TOKEN_STRING, valaddr);
325                         break;
326                 default:
327                         report_parse_error(JSON_PARSE_VALUE, lex);
328         }
329
330         if (sfunc != NULL)
331                 (*sfunc) (sem->semstate, val, tok);
332 }
333
334 static void
335 parse_object_field(JsonLexContext *lex, JsonSemAction sem)
336 {
337         /*
338          * an object field is "fieldname" : value where value can be a scalar,
339          * object or array
340          */
341
342         char       *fname = NULL;       /* keep compiler quiet */
343         json_ofield_action ostart = sem->object_field_start;
344         json_ofield_action oend = sem->object_field_end;
345         bool            isnull;
346         char      **fnameaddr = NULL;
347         JsonTokenType tok;
348
349         if (ostart != NULL || oend != NULL)
350                 fnameaddr = &fname;
351
352         if (!lex_accept(lex, JSON_TOKEN_STRING, fnameaddr))
353                 report_parse_error(JSON_PARSE_STRING, lex);
354
355         lex_expect(JSON_PARSE_OBJECT_LABEL, lex, JSON_TOKEN_COLON);
356
357         tok = lex_peek(lex);
358         isnull = tok == JSON_TOKEN_NULL;
359
360         if (ostart != NULL)
361                 (*ostart) (sem->semstate, fname, isnull);
362
363         switch (tok)
364         {
365                 case JSON_TOKEN_OBJECT_START:
366                         parse_object(lex, sem);
367                         break;
368                 case JSON_TOKEN_ARRAY_START:
369                         parse_array(lex, sem);
370                         break;
371                 default:
372                         parse_scalar(lex, sem);
373         }
374
375         if (oend != NULL)
376                 (*oend) (sem->semstate, fname, isnull);
377
378         if (fname != NULL)
379                 pfree(fname);
380 }
381
382 static void
383 parse_object(JsonLexContext *lex, JsonSemAction sem)
384 {
385         /*
386          * an object is a possibly empty sequence of object fields, separated by
387          * commas and surrounde by curly braces.
388          */
389         json_struct_action ostart = sem->object_start;
390         json_struct_action oend = sem->object_end;
391         JsonTokenType tok;
392
393         if (ostart != NULL)
394                 (*ostart) (sem->semstate);
395
396         /*
397          * Data inside an object at at a higher nesting level than the object
398          * itself. Note that we increment this after we call the semantic routine
399          * for the object start and restore it before we call the routine for the
400          * object end.
401          */
402         lex->lex_level++;
403
404         /* we know this will succeeed, just clearing the token */
405         lex_expect(JSON_PARSE_OBJECT_START, lex, JSON_TOKEN_OBJECT_START);
406
407         tok = lex_peek(lex);
408         switch (tok)
409         {
410                 case JSON_TOKEN_STRING:
411                         parse_object_field(lex, sem);
412                         while (lex_accept(lex, JSON_TOKEN_COMMA, NULL))
413                                 parse_object_field(lex, sem);
414                         break;
415                 case JSON_TOKEN_OBJECT_END:
416                         break;
417                 default:
418                         /* case of an invalid initial token inside the object */
419                         report_parse_error(JSON_PARSE_OBJECT_START, lex);
420         }
421
422         lex_expect(JSON_PARSE_OBJECT_NEXT, lex, JSON_TOKEN_OBJECT_END);
423
424         lex->lex_level--;
425
426         if (oend != NULL)
427                 (*oend) (sem->semstate);
428 }
429
430 static void
431 parse_array_element(JsonLexContext *lex, JsonSemAction sem)
432 {
433         json_aelem_action astart = sem->array_element_start;
434         json_aelem_action aend = sem->array_element_end;
435         JsonTokenType tok = lex_peek(lex);
436
437         bool            isnull;
438
439         isnull = tok == JSON_TOKEN_NULL;
440
441         if (astart != NULL)
442                 (*astart) (sem->semstate, isnull);
443
444         /* an array element is any object, array or scalar */
445         switch (tok)
446         {
447                 case JSON_TOKEN_OBJECT_START:
448                         parse_object(lex, sem);
449                         break;
450                 case JSON_TOKEN_ARRAY_START:
451                         parse_array(lex, sem);
452                         break;
453                 default:
454                         parse_scalar(lex, sem);
455         }
456
457         if (aend != NULL)
458                 (*aend) (sem->semstate, isnull);
459 }
460
461 static void
462 parse_array(JsonLexContext *lex, JsonSemAction sem)
463 {
464         /*
465          * an array is a possibly empty sequence of array elements, separated by
466          * commas and surrounded by square brackets.
467          */
468         json_struct_action astart = sem->array_start;
469         json_struct_action aend = sem->array_end;
470
471         if (astart != NULL)
472                 (*astart) (sem->semstate);
473
474         /*
475          * Data inside an array at at a higher nesting level than the array
476          * itself. Note that we increment this after we call the semantic routine
477          * for the array start and restore it before we call the routine for the
478          * array end.
479          */
480         lex->lex_level++;
481
482         lex_expect(JSON_PARSE_ARRAY_START, lex, JSON_TOKEN_ARRAY_START);
483         if (lex_peek(lex) != JSON_TOKEN_ARRAY_END)
484         {
485
486                 parse_array_element(lex, sem);
487
488                 while (lex_accept(lex, JSON_TOKEN_COMMA, NULL))
489                         parse_array_element(lex, sem);
490         }
491
492         lex_expect(JSON_PARSE_ARRAY_NEXT, lex, JSON_TOKEN_ARRAY_END);
493
494         lex->lex_level--;
495
496         if (aend != NULL)
497                 (*aend) (sem->semstate);
498 }
499
500 /*
501  * Lex one token from the input stream.
502  */
503 static inline void
504 json_lex(JsonLexContext *lex)
505 {
506         char       *s;
507         int                     len;
508
509         /* Skip leading whitespace. */
510         s = lex->token_terminator;
511         len = s - lex->input;
512         while (len < lex->input_length &&
513                    (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r'))
514         {
515                 if (*s == '\n')
516                         ++lex->line_number;
517                 ++s;
518                 ++len;
519         }
520         lex->token_start = s;
521
522         /* Determine token type. */
523         if (len >= lex->input_length)
524         {
525                 lex->token_start = NULL;
526                 lex->prev_token_terminator = lex->token_terminator;
527                 lex->token_terminator = s;
528                 lex->token_type = JSON_TOKEN_END;
529         }
530         else
531                 switch (*s)
532                 {
533                                 /* Single-character token, some kind of punctuation mark. */
534                         case '{':
535                                 lex->prev_token_terminator = lex->token_terminator;
536                                 lex->token_terminator = s + 1;
537                                 lex->token_type = JSON_TOKEN_OBJECT_START;
538                                 break;
539                         case '}':
540                                 lex->prev_token_terminator = lex->token_terminator;
541                                 lex->token_terminator = s + 1;
542                                 lex->token_type = JSON_TOKEN_OBJECT_END;
543                                 break;
544                         case '[':
545                                 lex->prev_token_terminator = lex->token_terminator;
546                                 lex->token_terminator = s + 1;
547                                 lex->token_type = JSON_TOKEN_ARRAY_START;
548                                 break;
549                         case ']':
550                                 lex->prev_token_terminator = lex->token_terminator;
551                                 lex->token_terminator = s + 1;
552                                 lex->token_type = JSON_TOKEN_ARRAY_END;
553                                 break;
554                         case ',':
555                                 lex->prev_token_terminator = lex->token_terminator;
556                                 lex->token_terminator = s + 1;
557                                 lex->token_type = JSON_TOKEN_COMMA;
558                                 break;
559                         case ':':
560                                 lex->prev_token_terminator = lex->token_terminator;
561                                 lex->token_terminator = s + 1;
562                                 lex->token_type = JSON_TOKEN_COLON;
563                                 break;
564                         case '"':
565                                 /* string */
566                                 json_lex_string(lex);
567                                 lex->token_type = JSON_TOKEN_STRING;
568                                 break;
569                         case '-':
570                                 /* Negative number. */
571                                 json_lex_number(lex, s + 1);
572                                 lex->token_type = JSON_TOKEN_NUMBER;
573                                 break;
574                         case '0':
575                         case '1':
576                         case '2':
577                         case '3':
578                         case '4':
579                         case '5':
580                         case '6':
581                         case '7':
582                         case '8':
583                         case '9':
584                                 /* Positive number. */
585                                 json_lex_number(lex, s);
586                                 lex->token_type = JSON_TOKEN_NUMBER;
587                                 break;
588                         default:
589                                 {
590                                         char       *p;
591
592                                         /*
593                                          * We're not dealing with a string, number, legal
594                                          * punctuation mark, or end of string.  The only legal
595                                          * tokens we might find here are true, false, and null,
596                                          * but for error reporting purposes we scan until we see a
597                                          * non-alphanumeric character.  That way, we can report
598                                          * the whole word as an unexpected token, rather than just
599                                          * some unintuitive prefix thereof.
600                                          */
601                                         for (p = s; JSON_ALPHANUMERIC_CHAR(*p) && p - s < lex->input_length - len; p++)
602                                                  /* skip */ ;
603
604                                         /*
605                                          * We got some sort of unexpected punctuation or an
606                                          * otherwise unexpected character, so just complain about
607                                          * that one character.
608                                          */
609                                         if (p == s)
610                                         {
611                                                 lex->prev_token_terminator = lex->token_terminator;
612                                                 lex->token_terminator = s + 1;
613                                                 report_invalid_token(lex);
614                                         }
615
616                                         /*
617                                          * We've got a real alphanumeric token here.  If it
618                                          * happens to be true, false, or null, all is well.  If
619                                          * not, error out.
620                                          */
621                                         lex->prev_token_terminator = lex->token_terminator;
622                                         lex->token_terminator = p;
623                                         if (p - s == 4)
624                                         {
625                                                 if (memcmp(s, "true", 4) == 0)
626                                                         lex->token_type = JSON_TOKEN_TRUE;
627                                                 else if (memcmp(s, "null", 4) == 0)
628                                                         lex->token_type = JSON_TOKEN_NULL;
629                                                 else
630                                                         report_invalid_token(lex);
631                                         }
632                                         else if (p - s == 5 && memcmp(s, "false", 5) == 0)
633                                                 lex->token_type = JSON_TOKEN_FALSE;
634                                         else
635                                                 report_invalid_token(lex);
636
637                                 }
638                 }                                               /* end of switch */
639 }
640
641 /*
642  * The next token in the input stream is known to be a string; lex it.
643  */
644 static inline void
645 json_lex_string(JsonLexContext *lex)
646 {
647         char       *s;
648         int                     len;
649
650         if (lex->strval != NULL)
651                 resetStringInfo(lex->strval);
652
653         len = lex->token_start - lex->input;
654         len++;
655         for (s = lex->token_start + 1; *s != '"'; s++, len++)
656         {
657                 /* Premature end of the string. */
658                 if (len >= lex->input_length)
659                 {
660                         lex->token_terminator = s;
661                         report_invalid_token(lex);
662                 }
663                 else if ((unsigned char) *s < 32)
664                 {
665                         /* Per RFC4627, these characters MUST be escaped. */
666                         /* Since *s isn't printable, exclude it from the context string */
667                         lex->token_terminator = s;
668                         ereport(ERROR,
669                                         (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
670                                          errmsg("invalid input syntax for type json"),
671                                          errdetail("Character with value 0x%02x must be escaped.",
672                                                            (unsigned char) *s),
673                                          report_json_context(lex)));
674                 }
675                 else if (*s == '\\')
676                 {
677                         /* OK, we have an escape character. */
678                         s++;
679                         len++;
680                         if (len >= lex->input_length)
681                         {
682                                 lex->token_terminator = s;
683                                 report_invalid_token(lex);
684                         }
685                         else if (*s == 'u')
686                         {
687                                 int                     i;
688                                 int                     ch = 0;
689
690                                 for (i = 1; i <= 4; i++)
691                                 {
692                                         s++;
693                                         len++;
694                                         if (len >= lex->input_length)
695                                         {
696                                                 lex->token_terminator = s;
697                                                 report_invalid_token(lex);
698                                         }
699                                         else if (*s >= '0' && *s <= '9')
700                                                 ch = (ch * 16) + (*s - '0');
701                                         else if (*s >= 'a' && *s <= 'f')
702                                                 ch = (ch * 16) + (*s - 'a') + 10;
703                                         else if (*s >= 'A' && *s <= 'F')
704                                                 ch = (ch * 16) + (*s - 'A') + 10;
705                                         else
706                                         {
707                                                 lex->token_terminator = s + pg_mblen(s);
708                                                 ereport(ERROR,
709                                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
710                                                                  errmsg("invalid input syntax for type json"),
711                                                                  errdetail("\"\\u\" must be followed by four hexadecimal digits."),
712                                                                  report_json_context(lex)));
713                                         }
714                                 }
715                                 if (lex->strval != NULL)
716                                 {
717                                         char            utf8str[5];
718                                         int                     utf8len;
719                                         char       *converted;
720
721                                         unicode_to_utf8(ch, (unsigned char *) utf8str);
722                                         utf8len = pg_utf_mblen((unsigned char *) utf8str);
723                                         utf8str[utf8len] = '\0';
724                                         converted = pg_any_to_server(utf8str, utf8len, PG_UTF8);
725                                         appendStringInfoString(lex->strval, converted);
726                                         if (converted != utf8str)
727                                                 pfree(converted);
728
729                                 }
730                         }
731                         else if (lex->strval != NULL)
732                         {
733                                 switch (*s)
734                                 {
735                                         case '"':
736                                         case '\\':
737                                         case '/':
738                                                 appendStringInfoChar(lex->strval, *s);
739                                                 break;
740                                         case 'b':
741                                                 appendStringInfoChar(lex->strval, '\b');
742                                                 break;
743                                         case 'f':
744                                                 appendStringInfoChar(lex->strval, '\f');
745                                                 break;
746                                         case 'n':
747                                                 appendStringInfoChar(lex->strval, '\n');
748                                                 break;
749                                         case 'r':
750                                                 appendStringInfoChar(lex->strval, '\r');
751                                                 break;
752                                         case 't':
753                                                 appendStringInfoChar(lex->strval, '\t');
754                                                 break;
755                                         default:
756                                                 /* Not a valid string escape, so error out. */
757                                                 lex->token_terminator = s + pg_mblen(s);
758                                                 ereport(ERROR,
759                                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
760                                                                  errmsg("invalid input syntax for type json"),
761                                                         errdetail("Escape sequence \"\\%s\" is invalid.",
762                                                                           extract_mb_char(s)),
763                                                                  report_json_context(lex)));
764                                 }
765                         }
766                         else if (strchr("\"\\/bfnrt", *s) == NULL)
767                         {
768                                 /*
769                                  * Simpler processing if we're not bothered about de-escaping
770                                  *
771                                  * It's very tempting to remove the strchr() call here and
772                                  * replace it with a switch statement, but testing so far has
773                                  * shown it's not a performance win.
774                                  */
775                                 lex->token_terminator = s + pg_mblen(s);
776                                 ereport(ERROR,
777                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
778                                                  errmsg("invalid input syntax for type json"),
779                                                  errdetail("Escape sequence \"\\%s\" is invalid.",
780                                                                    extract_mb_char(s)),
781                                                  report_json_context(lex)));
782                         }
783
784                 }
785                 else if (lex->strval != NULL)
786                 {
787                         appendStringInfoChar(lex->strval, *s);
788                 }
789
790         }
791
792         /* Hooray, we found the end of the string! */
793         lex->prev_token_terminator = lex->token_terminator;
794         lex->token_terminator = s + 1;
795 }
796
797 /*-------------------------------------------------------------------------
798  * The next token in the input stream is known to be a number; lex it.
799  *
800  * In JSON, a number consists of four parts:
801  *
802  * (1) An optional minus sign ('-').
803  *
804  * (2) Either a single '0', or a string of one or more digits that does not
805  *         begin with a '0'.
806  *
807  * (3) An optional decimal part, consisting of a period ('.') followed by
808  *         one or more digits.  (Note: While this part can be omitted
809  *         completely, it's not OK to have only the decimal point without
810  *         any digits afterwards.)
811  *
812  * (4) An optional exponent part, consisting of 'e' or 'E', optionally
813  *         followed by '+' or '-', followed by one or more digits.      (Note:
814  *         As with the decimal part, if 'e' or 'E' is present, it must be
815  *         followed by at least one digit.)
816  *
817  * The 's' argument to this function points to the ostensible beginning
818  * of part 2 - i.e. the character after any optional minus sign, and the
819  * first character of the string if there is none.
820  *
821  *-------------------------------------------------------------------------
822  */
823 static inline void
824 json_lex_number(JsonLexContext *lex, char *s)
825 {
826         bool            error = false;
827         char       *p;
828         int                     len;
829
830         len = s - lex->input;
831         /* Part (1): leading sign indicator. */
832         /* Caller already did this for us; so do nothing. */
833
834         /* Part (2): parse main digit string. */
835         if (*s == '0')
836         {
837                 s++;
838                 len++;
839         }
840         else if (*s >= '1' && *s <= '9')
841         {
842                 do
843                 {
844                         s++;
845                         len++;
846                 } while (*s >= '0' && *s <= '9' && len < lex->input_length);
847         }
848         else
849                 error = true;
850
851         /* Part (3): parse optional decimal portion. */
852         if (len < lex->input_length && *s == '.')
853         {
854                 s++;
855                 len++;
856                 if (len == lex->input_length || *s < '0' || *s > '9')
857                         error = true;
858                 else
859                 {
860                         do
861                         {
862                                 s++;
863                                 len++;
864                         } while (*s >= '0' && *s <= '9' && len < lex->input_length);
865                 }
866         }
867
868         /* Part (4): parse optional exponent. */
869         if (len < lex->input_length && (*s == 'e' || *s == 'E'))
870         {
871                 s++;
872                 len++;
873                 if (len < lex->input_length && (*s == '+' || *s == '-'))
874                 {
875                         s++;
876                         len++;
877                 }
878                 if (len == lex->input_length || *s < '0' || *s > '9')
879                         error = true;
880                 else
881                 {
882                         do
883                         {
884                                 s++;
885                                 len++;
886                         } while (len < lex->input_length && *s >= '0' && *s <= '9');
887                 }
888         }
889
890         /*
891          * Check for trailing garbage.  As in json_lex(), any alphanumeric stuff
892          * here should be considered part of the token for error-reporting
893          * purposes.
894          */
895         for (p = s; JSON_ALPHANUMERIC_CHAR(*p) && len < lex->input_length; p++, len++)
896                 error = true;
897         lex->prev_token_terminator = lex->token_terminator;
898         lex->token_terminator = p;
899         if (error)
900                 report_invalid_token(lex);
901 }
902
903 /*
904  * Report a parse error.
905  *
906  * lex->token_start and lex->token_terminator must identify the current token.
907  */
908 static void
909 report_parse_error(JsonParseContext ctx, JsonLexContext *lex)
910 {
911         char       *token;
912         int                     toklen;
913
914         /* Handle case where the input ended prematurely. */
915         if (lex->token_start == NULL || lex->token_type == JSON_TOKEN_END)
916                 ereport(ERROR,
917                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
918                                  errmsg("invalid input syntax for type json"),
919                                  errdetail("The input string ended unexpectedly."),
920                                  report_json_context(lex)));
921
922         /* Separate out the current token. */
923         toklen = lex->token_terminator - lex->token_start;
924         token = palloc(toklen + 1);
925         memcpy(token, lex->token_start, toklen);
926         token[toklen] = '\0';
927
928         /* Complain, with the appropriate detail message. */
929         if (ctx == JSON_PARSE_END)
930                 ereport(ERROR,
931                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
932                                  errmsg("invalid input syntax for type json"),
933                                  errdetail("Expected end of input, but found \"%s\".",
934                                                    token),
935                                  report_json_context(lex)));
936         else
937         {
938                 switch (ctx)
939                 {
940                         case JSON_PARSE_VALUE:
941                                 ereport(ERROR,
942                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
943                                                  errmsg("invalid input syntax for type json"),
944                                                  errdetail("Expected JSON value, but found \"%s\".",
945                                                                    token),
946                                                  report_json_context(lex)));
947                                 break;
948                         case JSON_PARSE_STRING:
949                                 ereport(ERROR,
950                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
951                                                  errmsg("invalid input syntax for type json"),
952                                                  errdetail("Expected string, but found \"%s\".",
953                                                                    token),
954                                                  report_json_context(lex)));
955                                 break;
956                         case JSON_PARSE_ARRAY_START:
957                                 ereport(ERROR,
958                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
959                                                  errmsg("invalid input syntax for type json"),
960                                                  errdetail("Expected array element or \"]\", but found \"%s\".",
961                                                                    token),
962                                                  report_json_context(lex)));
963                                 break;
964                         case JSON_PARSE_ARRAY_NEXT:
965                                 ereport(ERROR,
966                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
967                                                  errmsg("invalid input syntax for type json"),
968                                           errdetail("Expected \",\" or \"]\", but found \"%s\".",
969                                                                 token),
970                                                  report_json_context(lex)));
971                                 break;
972                         case JSON_PARSE_OBJECT_START:
973                                 ereport(ERROR,
974                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
975                                                  errmsg("invalid input syntax for type json"),
976                                          errdetail("Expected string or \"}\", but found \"%s\".",
977                                                            token),
978                                                  report_json_context(lex)));
979                                 break;
980                         case JSON_PARSE_OBJECT_LABEL:
981                                 ereport(ERROR,
982                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
983                                                  errmsg("invalid input syntax for type json"),
984                                                  errdetail("Expected \":\", but found \"%s\".",
985                                                                    token),
986                                                  report_json_context(lex)));
987                                 break;
988                         case JSON_PARSE_OBJECT_NEXT:
989                                 ereport(ERROR,
990                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
991                                                  errmsg("invalid input syntax for type json"),
992                                           errdetail("Expected \",\" or \"}\", but found \"%s\".",
993                                                                 token),
994                                                  report_json_context(lex)));
995                                 break;
996                         case JSON_PARSE_OBJECT_COMMA:
997                                 ereport(ERROR,
998                                                 (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
999                                                  errmsg("invalid input syntax for type json"),
1000                                                  errdetail("Expected string, but found \"%s\".",
1001                                                                    token),
1002                                                  report_json_context(lex)));
1003                                 break;
1004                         default:
1005                                 elog(ERROR, "unexpected json parse state: %d", ctx);
1006                 }
1007         }
1008 }
1009
1010 /*
1011  * Report an invalid input token.
1012  *
1013  * lex->token_start and lex->token_terminator must identify the token.
1014  */
1015 static void
1016 report_invalid_token(JsonLexContext *lex)
1017 {
1018         char       *token;
1019         int                     toklen;
1020
1021         /* Separate out the offending token. */
1022         toklen = lex->token_terminator - lex->token_start;
1023         token = palloc(toklen + 1);
1024         memcpy(token, lex->token_start, toklen);
1025         token[toklen] = '\0';
1026
1027         ereport(ERROR,
1028                         (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
1029                          errmsg("invalid input syntax for type json"),
1030                          errdetail("Token \"%s\" is invalid.", token),
1031                          report_json_context(lex)));
1032 }
1033
1034 /*
1035  * Report a CONTEXT line for bogus JSON input.
1036  *
1037  * lex->token_terminator must be set to identify the spot where we detected
1038  * the error.  Note that lex->token_start might be NULL, in case we recognized
1039  * error at EOF.
1040  *
1041  * The return value isn't meaningful, but we make it non-void so that this
1042  * can be invoked inside ereport().
1043  */
1044 static int
1045 report_json_context(JsonLexContext *lex)
1046 {
1047         const char *context_start;
1048         const char *context_end;
1049         const char *line_start;
1050         int                     line_number;
1051         char       *ctxt;
1052         int                     ctxtlen;
1053         const char *prefix;
1054         const char *suffix;
1055
1056         /* Choose boundaries for the part of the input we will display */
1057         context_start = lex->input;
1058         context_end = lex->token_terminator;
1059         line_start = context_start;
1060         line_number = 1;
1061         for (;;)
1062         {
1063                 /* Always advance over newlines (context_end test is just paranoia) */
1064                 if (*context_start == '\n' && context_start < context_end)
1065                 {
1066                         context_start++;
1067                         line_start = context_start;
1068                         line_number++;
1069                         continue;
1070                 }
1071                 /* Otherwise, done as soon as we are close enough to context_end */
1072                 if (context_end - context_start < 50)
1073                         break;
1074                 /* Advance to next multibyte character */
1075                 if (IS_HIGHBIT_SET(*context_start))
1076                         context_start += pg_mblen(context_start);
1077                 else
1078                         context_start++;
1079         }
1080
1081         /*
1082          * We add "..." to indicate that the excerpt doesn't start at the
1083          * beginning of the line ... but if we're within 3 characters of the
1084          * beginning of the line, we might as well just show the whole line.
1085          */
1086         if (context_start - line_start <= 3)
1087                 context_start = line_start;
1088
1089         /* Get a null-terminated copy of the data to present */
1090         ctxtlen = context_end - context_start;
1091         ctxt = palloc(ctxtlen + 1);
1092         memcpy(ctxt, context_start, ctxtlen);
1093         ctxt[ctxtlen] = '\0';
1094
1095         /*
1096          * Show the context, prefixing "..." if not starting at start of line, and
1097          * suffixing "..." if not ending at end of line.
1098          */
1099         prefix = (context_start > line_start) ? "..." : "";
1100         suffix = (lex->token_type != JSON_TOKEN_END && context_end - lex->input < lex->input_length && *context_end != '\n' && *context_end != '\r') ? "..." : "";
1101
1102         return errcontext("JSON data, line %d: %s%s%s",
1103                                           line_number, prefix, ctxt, suffix);
1104 }
1105
1106 /*
1107  * Extract a single, possibly multi-byte char from the input string.
1108  */
1109 static char *
1110 extract_mb_char(char *s)
1111 {
1112         char       *res;
1113         int                     len;
1114
1115         len = pg_mblen(s);
1116         res = palloc(len + 1);
1117         memcpy(res, s, len);
1118         res[len] = '\0';
1119
1120         return res;
1121 }
1122
1123 /*
1124  * Turn a scalar Datum into JSON, appending the string to "result".
1125  *
1126  * Hand off a non-scalar datum to composite_to_json or array_to_json_internal
1127  * as appropriate.
1128  */
1129 static void
1130 datum_to_json(Datum val, bool is_null, StringInfo result,
1131                           TYPCATEGORY tcategory, Oid typoutputfunc)
1132 {
1133         char       *outputstr;
1134         text       *jsontext;
1135
1136         if (is_null)
1137         {
1138                 appendStringInfoString(result, "null");
1139                 return;
1140         }
1141
1142         switch (tcategory)
1143         {
1144                 case TYPCATEGORY_ARRAY:
1145                         array_to_json_internal(val, result, false);
1146                         break;
1147                 case TYPCATEGORY_COMPOSITE:
1148                         composite_to_json(val, result, false);
1149                         break;
1150                 case TYPCATEGORY_BOOLEAN:
1151                         if (DatumGetBool(val))
1152                                 appendStringInfoString(result, "true");
1153                         else
1154                                 appendStringInfoString(result, "false");
1155                         break;
1156                 case TYPCATEGORY_NUMERIC:
1157                         outputstr = OidOutputFunctionCall(typoutputfunc, val);
1158
1159                         /*
1160                          * Don't call escape_json here if it's a valid JSON number.
1161                          * Numeric output should usually be a valid JSON number and JSON
1162                          * numbers shouldn't be quoted. Quote cases like "Nan" and
1163                          * "Infinity", however.
1164                          */
1165                         if (strpbrk(outputstr, NON_NUMERIC_LETTER) == NULL)
1166                                 appendStringInfoString(result, outputstr);
1167                         else
1168                                 escape_json(result, outputstr);
1169                         pfree(outputstr);
1170                         break;
1171                 case TYPCATEGORY_JSON:
1172                         /* JSON will already be escaped */
1173                         outputstr = OidOutputFunctionCall(typoutputfunc, val);
1174                         appendStringInfoString(result, outputstr);
1175                         pfree(outputstr);
1176                         break;
1177                 case TYPCATEGORY_JSON_CAST:
1178                         jsontext = DatumGetTextP(OidFunctionCall1(typoutputfunc, val));
1179                         outputstr = text_to_cstring(jsontext);
1180                         appendStringInfoString(result, outputstr);
1181                         pfree(outputstr);
1182                         pfree(jsontext);
1183                         break;
1184                 default:
1185                         outputstr = OidOutputFunctionCall(typoutputfunc, val);
1186                         escape_json(result, outputstr);
1187                         pfree(outputstr);
1188                         break;
1189         }
1190 }
1191
1192 /*
1193  * Process a single dimension of an array.
1194  * If it's the innermost dimension, output the values, otherwise call
1195  * ourselves recursively to process the next dimension.
1196  */
1197 static void
1198 array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals,
1199                                   bool *nulls, int *valcount, TYPCATEGORY tcategory,
1200                                   Oid typoutputfunc, bool use_line_feeds)
1201 {
1202         int                     i;
1203         const char *sep;
1204
1205         Assert(dim < ndims);
1206
1207         sep = use_line_feeds ? ",\n " : ",";
1208
1209         appendStringInfoChar(result, '[');
1210
1211         for (i = 1; i <= dims[dim]; i++)
1212         {
1213                 if (i > 1)
1214                         appendStringInfoString(result, sep);
1215
1216                 if (dim + 1 == ndims)
1217                 {
1218                         datum_to_json(vals[*valcount], nulls[*valcount], result, tcategory,
1219                                                   typoutputfunc);
1220                         (*valcount)++;
1221                 }
1222                 else
1223                 {
1224                         /*
1225                          * Do we want line feeds on inner dimensions of arrays? For now
1226                          * we'll say no.
1227                          */
1228                         array_dim_to_json(result, dim + 1, ndims, dims, vals, nulls,
1229                                                           valcount, tcategory, typoutputfunc, false);
1230                 }
1231         }
1232
1233         appendStringInfoChar(result, ']');
1234 }
1235
1236 /*
1237  * Turn an array into JSON.
1238  */
1239 static void
1240 array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
1241 {
1242         ArrayType  *v = DatumGetArrayTypeP(array);
1243         Oid                     element_type = ARR_ELEMTYPE(v);
1244         int                *dim;
1245         int                     ndim;
1246         int                     nitems;
1247         int                     count = 0;
1248         Datum      *elements;
1249         bool       *nulls;
1250         int16           typlen;
1251         bool            typbyval;
1252         char            typalign,
1253                                 typdelim;
1254         Oid                     typioparam;
1255         Oid                     typoutputfunc;
1256         TYPCATEGORY tcategory;
1257         Oid                     castfunc = InvalidOid;
1258
1259         ndim = ARR_NDIM(v);
1260         dim = ARR_DIMS(v);
1261         nitems = ArrayGetNItems(ndim, dim);
1262
1263         if (nitems <= 0)
1264         {
1265                 appendStringInfoString(result, "[]");
1266                 return;
1267         }
1268
1269         get_type_io_data(element_type, IOFunc_output,
1270                                          &typlen, &typbyval, &typalign,
1271                                          &typdelim, &typioparam, &typoutputfunc);
1272
1273         if (element_type > FirstNormalObjectId)
1274         {
1275                 HeapTuple       tuple;
1276                 Form_pg_cast castForm;
1277
1278                 tuple = SearchSysCache2(CASTSOURCETARGET,
1279                                                                 ObjectIdGetDatum(element_type),
1280                                                                 ObjectIdGetDatum(JSONOID));
1281                 if (HeapTupleIsValid(tuple))
1282                 {
1283                         castForm = (Form_pg_cast) GETSTRUCT(tuple);
1284
1285                         if (castForm->castmethod == COERCION_METHOD_FUNCTION)
1286                                 castfunc = typoutputfunc = castForm->castfunc;
1287
1288                         ReleaseSysCache(tuple);
1289                 }
1290         }
1291
1292         deconstruct_array(v, element_type, typlen, typbyval,
1293                                           typalign, &elements, &nulls,
1294                                           &nitems);
1295
1296         if (castfunc != InvalidOid)
1297                 tcategory = TYPCATEGORY_JSON_CAST;
1298         else if (element_type == RECORDOID)
1299                 tcategory = TYPCATEGORY_COMPOSITE;
1300         else if (element_type == JSONOID)
1301                 tcategory = TYPCATEGORY_JSON;
1302         else
1303                 tcategory = TypeCategory(element_type);
1304
1305         array_dim_to_json(result, 0, ndim, dim, elements, nulls, &count, tcategory,
1306                                           typoutputfunc, use_line_feeds);
1307
1308         pfree(elements);
1309         pfree(nulls);
1310 }
1311
1312 /*
1313  * Turn a composite / record into JSON.
1314  */
1315 static void
1316 composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
1317 {
1318         HeapTupleHeader td;
1319         Oid                     tupType;
1320         int32           tupTypmod;
1321         TupleDesc       tupdesc;
1322         HeapTupleData tmptup,
1323                            *tuple;
1324         int                     i;
1325         bool            needsep = false;
1326         const char *sep;
1327
1328         sep = use_line_feeds ? ",\n " : ",";
1329
1330         td = DatumGetHeapTupleHeader(composite);
1331
1332         /* Extract rowtype info and find a tupdesc */
1333         tupType = HeapTupleHeaderGetTypeId(td);
1334         tupTypmod = HeapTupleHeaderGetTypMod(td);
1335         tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
1336
1337         /* Build a temporary HeapTuple control structure */
1338         tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
1339         tmptup.t_data = td;
1340         tuple = &tmptup;
1341
1342         appendStringInfoChar(result, '{');
1343
1344         for (i = 0; i < tupdesc->natts; i++)
1345         {
1346                 Datum           val,
1347                                         origval;
1348                 bool            isnull;
1349                 char       *attname;
1350                 TYPCATEGORY tcategory;
1351                 Oid                     typoutput;
1352                 bool            typisvarlena;
1353                 Oid                     castfunc = InvalidOid;
1354
1355                 if (tupdesc->attrs[i]->attisdropped)
1356                         continue;
1357
1358                 if (needsep)
1359                         appendStringInfoString(result, sep);
1360                 needsep = true;
1361
1362                 attname = NameStr(tupdesc->attrs[i]->attname);
1363                 escape_json(result, attname);
1364                 appendStringInfoChar(result, ':');
1365
1366                 origval = heap_getattr(tuple, i + 1, tupdesc, &isnull);
1367
1368                 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1369                                                   &typoutput, &typisvarlena);
1370
1371                 if (tupdesc->attrs[i]->atttypid > FirstNormalObjectId)
1372                 {
1373                         HeapTuple       cast_tuple;
1374                         Form_pg_cast castForm;
1375
1376                         cast_tuple = SearchSysCache2(CASTSOURCETARGET,
1377                                                            ObjectIdGetDatum(tupdesc->attrs[i]->atttypid),
1378                                                                                  ObjectIdGetDatum(JSONOID));
1379                         if (HeapTupleIsValid(cast_tuple))
1380                         {
1381                                 castForm = (Form_pg_cast) GETSTRUCT(cast_tuple);
1382
1383                                 if (castForm->castmethod == COERCION_METHOD_FUNCTION)
1384                                         castfunc = typoutput = castForm->castfunc;
1385
1386                                 ReleaseSysCache(cast_tuple);
1387                         }
1388                 }
1389
1390                 if (castfunc != InvalidOid)
1391                         tcategory = TYPCATEGORY_JSON_CAST;
1392                 else if (tupdesc->attrs[i]->atttypid == RECORDARRAYOID)
1393                         tcategory = TYPCATEGORY_ARRAY;
1394                 else if (tupdesc->attrs[i]->atttypid == RECORDOID)
1395                         tcategory = TYPCATEGORY_COMPOSITE;
1396                 else if (tupdesc->attrs[i]->atttypid == JSONOID)
1397                         tcategory = TYPCATEGORY_JSON;
1398                 else
1399                         tcategory = TypeCategory(tupdesc->attrs[i]->atttypid);
1400
1401                 /*
1402                  * If we have a toasted datum, forcibly detoast it here to avoid
1403                  * memory leakage inside the type's output routine.
1404                  */
1405                 if (typisvarlena && !isnull)
1406                         val = PointerGetDatum(PG_DETOAST_DATUM(origval));
1407                 else
1408                         val = origval;
1409
1410                 datum_to_json(val, isnull, result, tcategory, typoutput);
1411
1412                 /* Clean up detoasted copy, if any */
1413                 if (val != origval)
1414                         pfree(DatumGetPointer(val));
1415         }
1416
1417         appendStringInfoChar(result, '}');
1418         ReleaseTupleDesc(tupdesc);
1419 }
1420
1421 /*
1422  * SQL function array_to_json(row)
1423  */
1424 extern Datum
1425 array_to_json(PG_FUNCTION_ARGS)
1426 {
1427         Datum           array = PG_GETARG_DATUM(0);
1428         StringInfo      result;
1429
1430         result = makeStringInfo();
1431
1432         array_to_json_internal(array, result, false);
1433
1434         PG_RETURN_TEXT_P(cstring_to_text(result->data));
1435 }
1436
1437 /*
1438  * SQL function array_to_json(row, prettybool)
1439  */
1440 extern Datum
1441 array_to_json_pretty(PG_FUNCTION_ARGS)
1442 {
1443         Datum           array = PG_GETARG_DATUM(0);
1444         bool            use_line_feeds = PG_GETARG_BOOL(1);
1445         StringInfo      result;
1446
1447         result = makeStringInfo();
1448
1449         array_to_json_internal(array, result, use_line_feeds);
1450
1451         PG_RETURN_TEXT_P(cstring_to_text(result->data));
1452 }
1453
1454 /*
1455  * SQL function row_to_json(row)
1456  */
1457 extern Datum
1458 row_to_json(PG_FUNCTION_ARGS)
1459 {
1460         Datum           array = PG_GETARG_DATUM(0);
1461         StringInfo      result;
1462
1463         result = makeStringInfo();
1464
1465         composite_to_json(array, result, false);
1466
1467         PG_RETURN_TEXT_P(cstring_to_text(result->data));
1468 }
1469
1470 /*
1471  * SQL function row_to_json(row, prettybool)
1472  */
1473 extern Datum
1474 row_to_json_pretty(PG_FUNCTION_ARGS)
1475 {
1476         Datum           array = PG_GETARG_DATUM(0);
1477         bool            use_line_feeds = PG_GETARG_BOOL(1);
1478         StringInfo      result;
1479
1480         result = makeStringInfo();
1481
1482         composite_to_json(array, result, use_line_feeds);
1483
1484         PG_RETURN_TEXT_P(cstring_to_text(result->data));
1485 }
1486
1487 /*
1488  * SQL function to_json(anyvalue)
1489  */
1490 Datum
1491 to_json(PG_FUNCTION_ARGS)
1492 {
1493         Oid                     val_type = get_fn_expr_argtype(fcinfo->flinfo, 0);
1494         StringInfo      result;
1495         Datum           orig_val,
1496                                 val;
1497         TYPCATEGORY tcategory;
1498         Oid                     typoutput;
1499         bool            typisvarlena;
1500         Oid                     castfunc = InvalidOid;
1501
1502         if (val_type == InvalidOid)
1503                 ereport(ERROR,
1504                                 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1505                                  errmsg("could not determine input data type")));
1506
1507
1508         result = makeStringInfo();
1509
1510         orig_val = PG_ARGISNULL(0) ? (Datum) 0 : PG_GETARG_DATUM(0);
1511
1512         getTypeOutputInfo(val_type, &typoutput, &typisvarlena);
1513
1514         if (val_type > FirstNormalObjectId)
1515         {
1516                 HeapTuple       tuple;
1517                 Form_pg_cast castForm;
1518
1519                 tuple = SearchSysCache2(CASTSOURCETARGET,
1520                                                                 ObjectIdGetDatum(val_type),
1521                                                                 ObjectIdGetDatum(JSONOID));
1522                 if (HeapTupleIsValid(tuple))
1523                 {
1524                         castForm = (Form_pg_cast) GETSTRUCT(tuple);
1525
1526                         if (castForm->castmethod == COERCION_METHOD_FUNCTION)
1527                                 castfunc = typoutput = castForm->castfunc;
1528
1529                         ReleaseSysCache(tuple);
1530                 }
1531         }
1532
1533         if (castfunc != InvalidOid)
1534                 tcategory = TYPCATEGORY_JSON_CAST;
1535         else if (val_type == RECORDARRAYOID)
1536                 tcategory = TYPCATEGORY_ARRAY;
1537         else if (val_type == RECORDOID)
1538                 tcategory = TYPCATEGORY_COMPOSITE;
1539         else if (val_type == JSONOID)
1540                 tcategory = TYPCATEGORY_JSON;
1541         else
1542                 tcategory = TypeCategory(val_type);
1543
1544         /*
1545          * If we have a toasted datum, forcibly detoast it here to avoid memory
1546          * leakage inside the type's output routine.
1547          */
1548         if (typisvarlena && orig_val != (Datum) 0)
1549                 val = PointerGetDatum(PG_DETOAST_DATUM(orig_val));
1550         else
1551                 val = orig_val;
1552
1553         datum_to_json(val, false, result, tcategory, typoutput);
1554
1555         /* Clean up detoasted copy, if any */
1556         if (val != orig_val)
1557                 pfree(DatumGetPointer(val));
1558
1559         PG_RETURN_TEXT_P(cstring_to_text(result->data));
1560 }
1561
1562 /*
1563  * json_agg transition function
1564  */
1565 Datum
1566 json_agg_transfn(PG_FUNCTION_ARGS)
1567 {
1568         Oid                     val_type = get_fn_expr_argtype(fcinfo->flinfo, 1);
1569         MemoryContext aggcontext,
1570                                 oldcontext;
1571         StringInfo      state;
1572         Datum           orig_val,
1573                                 val;
1574         TYPCATEGORY tcategory;
1575         Oid                     typoutput;
1576         bool            typisvarlena;
1577         Oid                     castfunc = InvalidOid;
1578
1579         if (val_type == InvalidOid)
1580                 ereport(ERROR,
1581                                 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1582                                  errmsg("could not determine input data type")));
1583
1584         if (!AggCheckCallContext(fcinfo, &aggcontext))
1585         {
1586                 /* cannot be called directly because of internal-type argument */
1587                 elog(ERROR, "json_agg_transfn called in non-aggregate context");
1588         }
1589
1590         if (PG_ARGISNULL(0))
1591         {
1592                 /*
1593                  * Make this StringInfo in a context where it will persist for the
1594                  * duration off the aggregate call. It's only needed for this initial
1595                  * piece, as the StringInfo routines make sure they use the right
1596                  * context to enlarge the object if necessary.
1597                  */
1598                 oldcontext = MemoryContextSwitchTo(aggcontext);
1599                 state = makeStringInfo();
1600                 MemoryContextSwitchTo(oldcontext);
1601
1602                 appendStringInfoChar(state, '[');
1603         }
1604         else
1605         {
1606                 state = (StringInfo) PG_GETARG_POINTER(0);
1607                 appendStringInfoString(state, ", ");
1608         }
1609
1610         /* fast path for NULLs */
1611         if (PG_ARGISNULL(1))
1612         {
1613                 orig_val = (Datum) 0;
1614                 datum_to_json(orig_val, true, state, 0, InvalidOid);
1615                 PG_RETURN_POINTER(state);
1616         }
1617
1618
1619         orig_val = PG_GETARG_DATUM(1);
1620
1621         getTypeOutputInfo(val_type, &typoutput, &typisvarlena);
1622
1623         if (val_type > FirstNormalObjectId)
1624         {
1625                 HeapTuple       tuple;
1626                 Form_pg_cast castForm;
1627
1628                 tuple = SearchSysCache2(CASTSOURCETARGET,
1629                                                                 ObjectIdGetDatum(val_type),
1630                                                                 ObjectIdGetDatum(JSONOID));
1631                 if (HeapTupleIsValid(tuple))
1632                 {
1633                         castForm = (Form_pg_cast) GETSTRUCT(tuple);
1634
1635                         if (castForm->castmethod == COERCION_METHOD_FUNCTION)
1636                                 castfunc = typoutput = castForm->castfunc;
1637
1638                         ReleaseSysCache(tuple);
1639                 }
1640         }
1641
1642         if (castfunc != InvalidOid)
1643                 tcategory = TYPCATEGORY_JSON_CAST;
1644         else if (val_type == RECORDARRAYOID)
1645                 tcategory = TYPCATEGORY_ARRAY;
1646         else if (val_type == RECORDOID)
1647                 tcategory = TYPCATEGORY_COMPOSITE;
1648         else if (val_type == JSONOID)
1649                 tcategory = TYPCATEGORY_JSON;
1650         else
1651                 tcategory = TypeCategory(val_type);
1652
1653         /*
1654          * If we have a toasted datum, forcibly detoast it here to avoid memory
1655          * leakage inside the type's output routine.
1656          */
1657         if (typisvarlena)
1658                 val = PointerGetDatum(PG_DETOAST_DATUM(orig_val));
1659         else
1660                 val = orig_val;
1661
1662         if (!PG_ARGISNULL(0) &&
1663           (tcategory == TYPCATEGORY_ARRAY || tcategory == TYPCATEGORY_COMPOSITE))
1664         {
1665                 appendStringInfoString(state, "\n ");
1666         }
1667
1668         datum_to_json(val, false, state, tcategory, typoutput);
1669
1670         /* Clean up detoasted copy, if any */
1671         if (val != orig_val)
1672                 pfree(DatumGetPointer(val));
1673
1674         /*
1675          * The transition type for array_agg() is declared to be "internal", which
1676          * is a pass-by-value type the same size as a pointer.  So we can safely
1677          * pass the ArrayBuildState pointer through nodeAgg.c's machinations.
1678          */
1679         PG_RETURN_POINTER(state);
1680 }
1681
1682 /*
1683  * json_agg final function
1684  */
1685 Datum
1686 json_agg_finalfn(PG_FUNCTION_ARGS)
1687 {
1688         StringInfo      state;
1689
1690         /* cannot be called directly because of internal-type argument */
1691         Assert(AggCheckCallContext(fcinfo, NULL));
1692
1693         state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
1694
1695         if (state == NULL)
1696                 PG_RETURN_NULL();
1697
1698         appendStringInfoChar(state, ']');
1699
1700         PG_RETURN_TEXT_P(cstring_to_text(state->data));
1701 }
1702
1703 /*
1704  * Produce a JSON string literal, properly escaping characters in the text.
1705  */
1706 void
1707 escape_json(StringInfo buf, const char *str)
1708 {
1709         const char *p;
1710
1711         appendStringInfoCharMacro(buf, '\"');
1712         for (p = str; *p; p++)
1713         {
1714                 switch (*p)
1715                 {
1716                         case '\b':
1717                                 appendStringInfoString(buf, "\\b");
1718                                 break;
1719                         case '\f':
1720                                 appendStringInfoString(buf, "\\f");
1721                                 break;
1722                         case '\n':
1723                                 appendStringInfoString(buf, "\\n");
1724                                 break;
1725                         case '\r':
1726                                 appendStringInfoString(buf, "\\r");
1727                                 break;
1728                         case '\t':
1729                                 appendStringInfoString(buf, "\\t");
1730                                 break;
1731                         case '"':
1732                                 appendStringInfoString(buf, "\\\"");
1733                                 break;
1734                         case '\\':
1735                                 appendStringInfoString(buf, "\\\\");
1736                                 break;
1737                         default:
1738                                 if ((unsigned char) *p < ' ')
1739                                         appendStringInfo(buf, "\\u%04x", (int) *p);
1740                                 else
1741                                         appendStringInfoCharMacro(buf, *p);
1742                                 break;
1743                 }
1744         }
1745         appendStringInfoCharMacro(buf, '\"');
1746 }