From 1e2f38b39b419a9a38536b4ca2b768da5bca2669 Mon Sep 17 00:00:00 2001 From: lxg Date: Wed, 26 Jan 2022 10:47:58 +0800 Subject: [PATCH] init project --- .gitignore | 8 + Dockerfile | 32 + Makefile | 5 + README.md | 29 + cmd/main.go | 40 + context.go | 69 + docs/assets/sip.png | Bin 0 -> 84998 bytes error.go | 19 + go.mod | 10 + go.sum | 140 + header.go | 559 ++++ header_test.go | 14 + method.go | 38 + pool/reader.go | 41 + proxy/conn.go | 41 + proxy/context.go | 42 + proxy/process.go | 36 + proxy/relationship.go | 8 + proxy/reverseproxy.go | 417 +++ proxy/reverseproxy_test.go | 10 + proxy/route.go | 14 + proxy/transaction.go | 73 + proxy/transport.go | 24 + request.go | 168 + request_test.go | 49 + response.go | 140 + response_test.go | 25 + status.go | 157 + transaction.go | 39 + transport.go | 25 + udp_transport.go | 180 ++ uri.go | 270 ++ uri_test.go | 22 + utils.go | 9 + .../golang/micro/helper/random/int.go | 11 + .../golang/micro/helper/random/ip.go | 21 + .../golang/micro/helper/random/string.go | 28 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 294 ++ vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 76 + vendor/github.com/rs/xid/.appveyor.yml | 27 + vendor/github.com/rs/xid/.travis.yml | 8 + vendor/github.com/rs/xid/LICENSE | 19 + vendor/github.com/rs/xid/README.md | 115 + vendor/github.com/rs/xid/hostid_darwin.go | 9 + vendor/github.com/rs/xid/hostid_fallback.go | 9 + vendor/github.com/rs/xid/hostid_freebsd.go | 9 + vendor/github.com/rs/xid/hostid_linux.go | 13 + vendor/github.com/rs/xid/hostid_windows.go | 38 + vendor/github.com/rs/xid/id.go | 380 +++ vendor/gopkg.in/yaml.v2/.travis.yml | 16 + vendor/gopkg.in/yaml.v2/LICENSE | 201 ++ vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 740 +++++ vendor/gopkg.in/yaml.v2/decode.go | 815 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 390 +++ vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++++ vendor/gopkg.in/yaml.v2/readerc.go | 412 +++ vendor/gopkg.in/yaml.v2/resolve.go | 258 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2711 +++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 466 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 739 +++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/modules.txt | 12 + 85 files changed, 14618 insertions(+) create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/main.go create mode 100644 context.go create mode 100644 docs/assets/sip.png create mode 100644 error.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 header.go create mode 100644 header_test.go create mode 100644 method.go create mode 100644 pool/reader.go create mode 100644 proxy/conn.go create mode 100644 proxy/context.go create mode 100644 proxy/process.go create mode 100644 proxy/relationship.go create mode 100644 proxy/reverseproxy.go create mode 100644 proxy/reverseproxy_test.go create mode 100644 proxy/route.go create mode 100644 proxy/transaction.go create mode 100644 proxy/transport.go create mode 100644 request.go create mode 100644 request_test.go create mode 100644 response.go create mode 100644 response_test.go create mode 100644 status.go create mode 100644 transaction.go create mode 100644 transport.go create mode 100644 udp_transport.go create mode 100644 uri.go create mode 100644 uri_test.go create mode 100644 utils.go create mode 100644 vendor/git.nspix.com/golang/micro/helper/random/int.go create mode 100644 vendor/git.nspix.com/golang/micro/helper/random/ip.go create mode 100644 vendor/git.nspix.com/golang/micro/helper/random/string.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/rs/xid/.appveyor.yml create mode 100644 vendor/github.com/rs/xid/.travis.yml create mode 100644 vendor/github.com/rs/xid/LICENSE create mode 100644 vendor/github.com/rs/xid/README.md create mode 100644 vendor/github.com/rs/xid/hostid_darwin.go create mode 100644 vendor/github.com/rs/xid/hostid_fallback.go create mode 100644 vendor/github.com/rs/xid/hostid_freebsd.go create mode 100644 vendor/github.com/rs/xid/hostid_linux.go create mode 100644 vendor/github.com/rs/xid/hostid_windows.go create mode 100644 vendor/github.com/rs/xid/id.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ead17dd --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +/.idea +/.trash +/bin +/pre-commit.sh +/TODO.md + +session.go +session_test.go \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..2d0cdb0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,32 @@ +FROM golang:1.15-alpine3.12 AS binarybuilder + +ENV BUILD_PATH=/app/siproxy + +RUN go env -w GO111MODULE=on && \ + go env -w GOPROXY=https://goproxy.cn,direct && \ + sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories && \ + mkdir -p ${BUILD_PATH} && \ + apk add --update --no-cache make bash git + +COPY . ${BUILD_PATH} + +WORKDIR ${BUILD_PATH} + +RUN make build + +FROM alpine:3.12 + +ENV BUILD_PATH=/app/siproxy + +RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories && \ + mkdir -p ${BUILD_PATH} && \ + mkdir -p ${BUILD_PATH}/bin && \ + apk add --update --no-cache tzdata && \ + cp -r -f /usr/share/zoneinfo/PRC /etc/localtime && \ + apk del tzdata + +COPY --from=binarybuilder ${BUILD_PATH}/bin/ /usr/local/bin/ + +EXPOSE 5060 + +ENTRYPOINT ["siproxy"] \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..5bf24fd --- /dev/null +++ b/Makefile @@ -0,0 +1,5 @@ +.PHONY: build + +build: + go mod vendor + go build -ldflags "-s -w " -o ./bin/siproxy ./cmd/main.go \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..84939aa --- /dev/null +++ b/README.md @@ -0,0 +1,29 @@ +# 简介 + +`SIP`是一款基于golang轻量级的sip协议解析和代理的库 + +## 功能列表 + +- sip消息读取和解析 +- sip消息改写和编码 +- sip 信令代理 +- sip 客户端库 + + +## sip 信令代理 + +### 代理流程图 + +![SIP代理](docs/assets/sip.png) + +### docker部署 + +1. 编译docker镜像 +``` +docker build . -t siproxy +``` + +2. 运行容器 +``` +docker run -it --name siproxy -p 5060:5060 siproxy +``` diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..842e991 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "flag" + "fmt" + "github.com/uole/sip/proxy" + yaml "gopkg.in/yaml.v2" + "os" +) + +type Config struct { + Listen string `json:"listen" yaml:"listen"` + Routes []*proxy.Route `json:"routes" yaml:"routes"` +} + +var ( + configFlag = flag.String("config", "", "") +) + +func main() { + var ( + fp *os.File + err error + ) + flag.Parse() + cfg := &Config{Listen: "0.0.0.0:5060"} + + if *configFlag != "" { + if fp, err = os.Open(*configFlag); err == nil { + if err = yaml.NewDecoder(fp).Decode(cfg); err != nil { + fmt.Println(err) + _ = fp.Close() + os.Exit(1) + } + _ = fp.Close() + } + } + serve := proxy.NewReverse(cfg.Routes) + _ = serve.Serve(cfg.Listen) +} diff --git a/context.go b/context.go new file mode 100644 index 0000000..41fdb5e --- /dev/null +++ b/context.go @@ -0,0 +1,69 @@ +package sip + +import ( + "strconv" +) + +type Context struct { + req *Request + sess *Session +} + +//CallID 返回当前的ID +func (ctx *Context) CallID() string { + return ctx.req.Header.Get(HeaderCallID).String() +} + +//Request request 对象 +func (ctx *Context) Request() *Request { + return ctx.req +} + +//SipFrom 获取 +func (ctx *Context) SipFrom() *AddressHeader { + if !ctx.Request().Header.Has(HeaderFrom) { + return nil + } + return ctx.Request().Header.Get(HeaderFrom).(*AddressHeader) +} + +//SipTo 获取 +func (ctx *Context) SipTo() *AddressHeader { + if !ctx.Request().Header.Has(HeaderTo) { + return nil + } + return ctx.Request().Header.Get(HeaderTo).(*AddressHeader) +} + +//write 返回一个Response对象 +func (ctx *Context) Write(res *Response) (err error) { + if !res.Header.Has(HeaderVia) { + viaHead := ctx.req.Header.Get(HeaderVia).(*ViaHeader) + if viaHead.Uri.Port == 0 { + viaHead.Uri.Port = 5060 + } + resViaHead := &ViaHeader{ + Protocol: viaHead.Protocol, + ProtocolVersion: viaHead.ProtocolVersion, + Transport: ctx.sess.transport.Protocol(), + Uri: viaHead.Uri.Clone(), + } + if viaHead.Uri.Port > 0 { + resViaHead.Uri.Params.Set("rport", strconv.Itoa(viaHead.Uri.Port)) + } + if viaHead.Uri.Params.Get("branch") != "" { + resViaHead.Uri.Params.Set("branch", viaHead.Uri.Params.Get("branch")) + } + res.Header.Set(HeaderVia, resViaHead) + } + if !res.Header.Has(HeaderUserAgent) { + res.Header.Set(HeaderUserAgent, defaultUserAgentHead) + } + if !res.Header.Has(HeaderContact) { + res.Header.Set(HeaderContact, &AddressHeader{Uri: &Uri{IsEncrypted: false, User: ctx.sess.Id, Host: ctx.sess.transport.Conn().LocalAddr().String(), Params: map[string]string{ + "transport": ctx.sess.transport.Protocol(), + }}}) + } + _, err = ctx.sess.transport.Write(res.Bytes()) + return +} diff --git a/docs/assets/sip.png b/docs/assets/sip.png new file mode 100644 index 0000000000000000000000000000000000000000..731b5006c451a238a9f1b615436698eff8eab957 GIT binary patch literal 84998 zcmd43c{o)4A3xlpXpyb#j3rCfCN;KU?35TnN|rK7LSm3%P%8VHZG@SuV~Yr7PlmBY z_OV1UjGdXWjrAGb_xHZ<=XX8Nbv=JRf8fkq=X^fry!ZF}^*X{#jP;pM^PWC(c{5gtu%X@g_$U)9^9Zho|+r>S%ZZAu(@W3q^-WTJ~7_M#84&Ou&)_0Q@Q8Ok_s{m3W+M_ z7qIi$_pr7@`UJ0lomt;2VDO9;Y^iS9YsG(Mp&9D;K^e~v@2r}6=l31)(|O_`*Se>TUmj7+sh>f4F^rj z@erC@I}&F6M!tTZr3SN`dVtvMJNO+q{1dU%AnO`=_f^TB;=#{=G$7UdP~W!js4;N@K<44*pI)Q{v5yjc=y2V;AcYvqe!1HDC7kuaBJXT1#O25DlBl8 zEkH@D!ui(}_99}P|YaRqm?^{}lX&Y(yn=1heH!(NPyv18FCw^J_52frG204rh5z@*cl zTqo_K2zaRXG0Yapr`KXqn78sd$8NwfH31(9DWfq6>=rJjnNI>45pR5 z7aZw!7mN?r+*ffWfj2YkL-C@MUJ=t$p~-{+Z*_#|g87PUBTC%X{sZN~6Bp0-EUOvL zU$Pf1da4=*wf4lj99%q6HS$-+8VGnl=?&(w?ESkO+fxTr$o^D);F#hcT`I;%dY=AU zua3bWjp5cQRFnxwb+})ZXe5gK?wa@9Mdc$II%a{sQ&?Ovi$h8Y85md}4YOt+=az1V zb;jbcy;lPaurf}j3E}qEjO}K(-JjVG8)*)k9_V8Tnw4Rzj|4Zu^<87^wL-eo@&fww zR6z;CCuu#YLE)MrDdPt7*77dbyNC3&^14)ACDb;By(2G0tCI8hNu~Rd6rNOyVPNyV zvTpx!+>2fwKEtnE{$|sz*Gc zM&l0sbwRg+iTEiv?TQ?Tg77EKYpS=8tayuDjcw@_$nET74UFb$R;J6Tgk)G^-vK_~!fVn<5Mww2{`Bu1i^8 zvIK70$hQ-@$sJpvPL~I~dkeXfaiRwoh;7Du{2h8k3Bf*y+1|$!174AyEhm5E_SCYq z?Iop9vg(6iB!Epx&)z^fhsu4-s0;)s`&li*s{eW?5r*B+pCC3bC|@fk&3_99yS!d( zjhKt$hU&nriwmXTj}LA(pQTl6*Di|;CF0@J5;2X&;LAhBXDe~m(>#BSpDOl`XLs^s zd?!>ft1}3LPH4JCH5>h8LJ%wvnJ|T*JE8YVKaumOwaVwryn3EioZxefoQyyYG`3ty z%_qhOf9U-k+NP)YqzbV6osyen2gH9FF_WjhrTU@y##wzOz)>AcQ0?k=|A*cj- z`yE(-jJ$!w0l^R9ns;wVCJRMsX&I|!ODS;c>V8LMaUesg8LQmGQ9)9q_Mv-e$d!=7;9%D@4u3FyXgI6##4;_ zG4(Q5Uj-yuE_~P2RqL&KK}6Kfm!q9}k{crrWAKB}yziD#E1xtVs8q zC!vtrFSkCWQ}hEg*3A(b4OenYe=EeK&SFsK>XL_otEm;|;r=4k)RQ*{K2PtyimBR> z#z4QHGBXL@4rN(Q9o3bwP`zeb2%7QZF;DI)bXKxV?z&|8y@GANrMNxfrj2OxO+o%% zhzcfxClwqrDVpomlChgtJa6SWIae;Nt!|F^6TD`S|M6`-PsRS>XF57PdYIR53arOH zi)IO)N|kaH$|DKazDKzrQzr?&tWf){TXr}Po9Hn8XVI=V@135Wx_9kDKgYr&{%6KF zd(P#y+!~m1DS?Oa5a711w>!N1W8z%67II|Qj^?QkPgiPJw#_{;%%*U_!pS_OB z)X^l$E4z^<;8bF@a&%=+eSzhat(S@8c_9%qSBZ3wP)7daFv-zIbMWQ)owHN=R1OR; z#L`{9S$MKl#^lpe7gy>xMt)l<_2RgJaw+mMczFUHlj8U8yMix z9*!p8d++Rm1PoiaCdYF3y(pt3Ql%h3@d}4|!SiL))$Y*Sshd^Mywv(gCm}nF zP7URCh*!^s%W=A_1oG_slczY~WqpSGtQe~{_5?? zTpgf<2j$A101Uf%1$vQQU$ReK_fHzluk0hS)ioD5H#*93?Zi_F#RX&5HbFb75S91e zaW|D`K0NNLlGk(POn_7d;;cIdyt-XYY) zK1ZyP14yM12|6I0Cz?4W-VrN*iRu#-NIkdJ#+R0oUrqQDDZ-VSHH=1f4gboHDeqCS zTnWubsgjSZ#|!o)Y1vrs^ms8=T+59r_P=JV^HjmB*LXh0+vR2lBSthspO)@yG8`BD zMQd*jSo%>|aP-}%obY>g>(!DkaNB_p<=iSi)_zCcGvUf3u`ON$RrbX#2mR~EicdJK zD`bk+(lo_zQS=$a?1ab$ji?^TmZHlYRa}%nJPzu#V5c2n2cJ%^r?64>tsEyI7 zilw6T!^X7AM?B+{+;&L+sF=uYBqKfc5^`y=rENfWGS8IoqbM%QT+QOWO@4$1&w!X+dbOwj>)nTq zUd#1ICUL*>inXU)mHGDxlkrrymCq7HT=5SR1z8`+>7x{wnOShX1W& zjnqNAgjNoBg*f_=?{&wiyQxJQwrjd*kmO4vJZhq1&SU_SQYc-?tO`&j0v72pN*$PG5HhX(07aI^(@Acs%pE^Ds4%Dqx6)n#K!%_qy`7AKoD$W(Cx^}67r zZ4zojqhTSX`6!{5=JhZHzvZ4Tx_2PBz*r~F6ipA~u>?hm_V#0!&}A>kBlZ4@2%=Io zj;!rJuqW2P@r2*|y^G+Petq8}h3*jC1(M3LLbDP2b=54N-|%W%)vj($RW)5zEuT=; z@_8ZU`M?|dynB6NDe6ai9rUYY9>&K6(!!=#i4wUF_!bs6ep}bPUVgqi13oN+xZ(Rc z8D@7@ul`9mE&P!>uVK0uf1LChmr1e9?D)H$dmwzRX4A~o)Lx3$jnuuCn=gN~Z3)!P`CKnj zmarDXc^3ppXsMK@zcoQd2TAPV0pGXk|J+8^@PBVfCddde?gA6#Z^V|(1+nNbsgqL! zrFyvG+kck9%J`r~tf7CG8h=HTU(iNm$9|E$|Ne7}*Z6fo5`T7wT<+pO6PI9Q>#lEoaaeOj&B4{mPGS>wwI>jA;4u1>3w6 z)tpFlP^eN6$lb>CyNr)5bC#dhW3YEi2C5ob1FLBI=@#&O^RNw;r*^9BX(M)VSI3{d ze$1iC+NmYI(R1?m>h#LNV{+$@=NV1W00ug?Jg3U%@VZ=+ar_lhPiQ_xS zQ>j|?evLFg84nkdANktEvOhQ;O=;Llg^n4<;*;GCiTQz44(CLFN90J{i+IC z$NH&$1=O?NPJpU0$q-C!gC3U8{Us5-#fRFp1f04_szF2mYjP$a7Jw6RQA&<#pRUeC z8Tl3&f_l31Grttp%Jx*`QMxWfof**uoN1BE{IaMr#i}HswjH^t>saJn`b+tHQji3R zpbNZm+M6gk`QSMFsVvO7Cw&oiH8os<%Gmnuz`15W5Py8Mn+w|&SvpCDDdm-H{mwhjHets+EldxVFmG4<- z_Ir4-85qm2#o_;<{H(oX{Z#7vYTKT3eA|X3{to%6my|SpzLA%J_8Udh3U-QJ*YVLH z;^$kcpr6%8Ql)H&)ssFend)i0)Zt*2EM^Rix6gRd9@3rRUf?TPI0eqFb_M*z7D#WD zw7`p{Z$)X-M8*kjZ0iqhnx&vlhFXQHo8}$U5$=_4Z3@0}LmRAR@|HD3Qo^q1=~@y% zi_65gvWL+mxSA}?7pi4FjV)k-WGU{cELn{}E)>MT64{>MTZ1=P6(wbpkFGbdyMLr8 zXzj(~S!ALZf5+AOs0ojHL<_(abGtR|bb*QL#Of>Rndyi1>h^yO6Mq^_6unPL5_h`m zNi@1SUfr+!O>%m_*%AFES=?B6Ri`}G=WD6P*H~se69>CQ+18P4Ve~IXRoVvzRt5ki zS10#|c^hKub}gA%U+A2UfX?}5!BKNz&0KaccD~&_;L7l$e%tLt&vbph=9fn2I~kU8 zVyfh5D$k4$gw%WV**|A;=fGW*PtJ!AVPLx^ES2Gkm9M~wV(C6g*zbleDqdlxDzU}L z-34%E-SPGAD4I)FKvL{kdf{g2pm*&4iF$Iz4hpt?-T$KMG+B9QqsPDf%PmAszRf@# z@Y*@mO((~q(>Qco>+8@QWb_*bNe0m_wz!K=h52vvi$LpHTkg|EO=7x*w8uL}8&2}7 z6In8a#32NehIWHI2Bg$UN-4>j>^)ME9jGTM*v=8srMYfAY)~}F7hZMNBxW*V&I|{# zps}sHIn-PgoR2!T%ycTt@C39yj|GOAR=jc8AN1k>mj-5B?r5*-Eb6RXl#PfcKEPtq zzmwvwF?8?7SF+nD3u$Ic7vDEbNi*in)VUdtV(Zg6Zz$D@m3y6-Cee624mxSVsiA}o za8cqk%S^&yB)@q6k#Hijzpg+vXQ+Pm11voHk~CWxdxgdxBhhltXq1TQCWudpq~B@n zyBD?<^!ODZv13x;$Mdz+E(^Kv065)}agqXg5`)a;VPy8cnfGV?Ir=Clym;lAJk{hr zpC4Z7HIOUhfF9r+MY<-(b)_Y)1Y;tjp*9SdOW*N)QuHYIwd^S?3$(`7Z+ar^L3Zbp zudvUvG9(7Gj8~fRVunNJ!=|rLIQm#2Fy3R9=LgTr2c`DD?<+Pw=#29}=GBy7#K{8Z ziS6q1Wbicx zXkA6y!D45SV?dTd1DPkKz_9Zany70pW5asx;uRqSp;#d~i%OxIG-E5{Zu<`Q%op+0 zQ2CZn)(`^w39fdL%+}(vFazsOxA$K2;gP#KdZ5r`oNOp$!+DY86wDezQI4)>@Di4c z12l^dyaDW9e;pq+KdF?|48n8lwX54Ic71Bit@LuO_WGV9T_V!bcXY=(ScP1H(=%90 z3oHTv0u!m!<2iN9-NN5ehh^xmwbCk zKB-Ck^!-|g>%8|3Z8__tAXUu#j_&a&59KacRXAO4nlyrO>r$SJR3nElm#WDwQ+=s5 z7ek{Y@^)ta2!tJ6LyiTUSYcyICNwL0tLByD=?6(nT?bk?9F|TBG5@PII)^_7|7rtj zi9c-QP#QW|EZWCn>E;R6`BkLhqEYl?)G72#mSSFOAudkrn{kNbFLPST4MLlPKG}q8 zNzbPZaRE<{q`$51ALloNp8s0l-LnEO)rW-M7?yvwCwA4eVWd@p#BcVw_>)V5gh2$* zBj8Dpgx7EBAW6&N7@&UN?l8xbOEjP)tas*xS3$O!$RSC0#)4_OEE*YJg0iN^LO?0j;j`1BtjU0=6lFI z$e~^R8x`4{=@umVh_dmBQvgvD4<0{~&VChoeKpeHzJ^I2rLE)Gx!B-XuD&(#+c!Bv zWh|yZ#ft~|y#A%HW@?~C44V`WC~b3%ntfMb zTNjtS1FAFxQM!G8DV_Ey8=jPKt?>$5glMS^a0>!?3)6vsoip}Id+ljPXTa|6`PzuiuhD3v*Fe>d+q@72(iB?$4Qbq`(7P*!3uwZddNnTXSIR*MwzbksNL@&r3CGG1EF&X~G%(N{`%&d0 z8M68^R=fIj(hX0MC~!c*6;hnu-Qi!!)W?Rr_Kd)m>de?o3NgxMRg-r~7$TBtY$E^> z;L}m>@?w^Z-w9P}ZHo4pb?Z`&2GwR5TJ5%tKGx=^07fsAB)9*#9evc!gq9)K%rUBq zZhzaQ6t^MUQUCh=#7R&PCdf zgPP81w!~KbF|GIJ?{tc9@HYi-dqxv-xR3a`oM(yEE9%$uC*;cw8{BV7LM|#>0z5~= za<{Q(N-wn#8hITc;;Swe-Sr>Kgb-wVurI7A+{RxLlJ4VKR{%D-S0&ib z)>b*+L6FxvP`&N`guexKsG3h_|JOC3K0UbTa|79_L%ximic2`%U1bQ%*mfE#QrMva9VHHFHBBz5>sUAWM5@}l06dg&g$Oq=uSxIDRDhgVRfXizLYF)ELmFNNqx%eC&=NlQJ9iT`(@1aY<2cEc?!1juf>t>GNoZ2OrB-G z?+l#<5Zu#t9?Q){8-11=o_vK2J;wX%US{5Ivv*ik>FIuX$eQJ%c5|UhYs}Dvdy_|U z;$mcn^quU_$F!U)oke*zbGi;auK95%!nF$#RNp#ub;54Ie}t#-7@4t00;|s&bMbT# z?owiu`7thU)AF|lOza<|++}87;bes4fJt!}ZoX-sRQZu+)G9|Hr()6tiyB;BU^Ocr zu*JuthNwCi_oljlh2`ZPdq2HB{wcPeVvd-7`Gtg8)I#XPdtvWH1I>nh9yUH5rGEiu zut;UTnSkCw{%9OVsyj_0)QssSlO21qRNN&Iwzu648)@c+Bk$WMvVy;|)`W#8>A9)(7EU+9vt=VzR z3*1@R-$UQ~jJdQ#hXB|={z7DeDTMf%mD)8*oxDtaZ5eYQZI`kcSnfBK+%Ph_dOhWF zQ!;<*$?e`IK%laKtBU6&TbcjTI%dUn%I>ix5K4O&jMwQh!Br#bxX(Qk0X5>&mmo z5XTy^?o(!-dnOH&Db=}VL&vHE-m`-t`_uRQj#6FS8di+vY8$U_b9Ae3?yrq%O&sOgJ2f6z$P>EQy|-9frr*p(9>P-*gY zUkWOaD0{}mp;^+k$qasnez#){G8+%| z)$`;ykqJ}a?Wwf2S*ld=8)bp3wqM4r4ZA)uBjB(^WH%q|BgWZM;!$s6OX;0zat}F6 zUHly7JfPdDXK1WqcAG8nSH2?X)5lA~S98xzpHQd%3mBSe-hMuLECsK!MDJ-+vxv!+@e-?b4= zw2B$T{IP*+R(@SC>+BE4!l(6{hNx2Giyju`{D5*N!=QJDyJuG^T6Kh4L!( zWA^GS@3Kh^FD4D;0(zR|pQQsRIci+c&M8e{`Nh}u1lYTFT)Fa^iFSHg;nVczce8AM zIt$P4m=>jb%ao>6b2<;BsNdinsHdZR1K`r~(LL)bjlPrsB(4WKLZOy3r2&`V6FY=` zBja=k^&8{nvAi$TCG5mQ7GLS}^;@r?i|XqGqizVe`D(DFcgVi{kNJ1e_isiPU!M+f z5A(0z)PUE5hHzG!bG^|1K=);Ef!rhiCtpLik zG;OlEdPzk7PUh%FRlssIZ5a#&zmw;Fl@LKo$*;g7&EaR}fa(+~wZ(e$!P7`h33RlP z_-7mS>M7+>URY-itdWY#ub<8Br3HzrNWcl7}w}LGG+hM>5+H z32cn}liEg%yEtztIrS`?WZ1|xDb;FD@M^5B+G-ouQWF4o!geSZE<0ETB%K;6r+sYT)4y;*7GFR0Tz2K@ZU#@79pdJQeLAL#yNqzChuyK@1p_#}5^ zAnfjRX)~kwrV9VgP4+2sLDv{QXu5L`nShTD$k62GrU_vWXrp*WSxG0l0l1WFAbq%=J|qWkgCG|D@wJ-WELl==Yw z=i%7QmNMnPtgMEx*gS8Kl|LB!TphyAQQbv+p#SrULVasfWkCdVajVn)ck0!}k3C-^ z;jeaCMb}9i&FcBFfW-?6KlFAVcfvZ95mLv)RngGqCJ9pMa(Gu=R<+*H86`TV{c~HP zAn*y;u>eujSKQgk0()>(-|4JgLs?3M-)sBtu=~z4%Mse6=p97=ICw^N=sbX;TixYV zCxYu%%2C_uLL|KD8O!uly6soWqvrNi7;o#y*KlqNtq z9yvLwFyf_LX=7N@ENQHUGvV6zS#9(3a#6xWAY}RA=$lcroGG5C?PDGWqgbU%O!A@n zudL;8wy`yz=}+B@t3UK)eCd?(6e8^?>l)fdOBn( z-??Y~Q1_wHp)pKJ1BgdAS|ev=yL#@SL+XE?isV~o#+28U9sj8?a4n0jYo{q2nAx-L zICWka+dvgj-Nx4lJDJ}LzbDQ2Q-8itY_IRZyt>*$3k2O{+!|8Ptw&@ zJ5u@^DK}H$B9i5OsSDNOuL)?ry0{8Ga939h$&;U~d&9CT71h1?YJ(Ljqo1A#NN+Ni z5kO&9+1_Z+Mq;XbwjwvI&-A|TC2Q|w2~1B`G(B4OH9e^seaTP|rc0x!YI&!V(@gJ8 zfd`!|b1BCSNs>_#>CZ>eOH*FqbE1RgFT4EX+FVQWwo?M8-+hmwRXN}INejrd=8s_A zd9gd2CDJX6!z;VascV0;f)%}6xFVXGhhfn?v6&h$ehy?Y`XB zF=B-4+H`J6;#RV@VM@3@a5m$myk~vvRC1ZU_(|mKQUB!*wO27)M{>R-ZL>L771NY@u zMT?>pTRgWBQjbQ8yqhv6e!>In({N2dKJD^USmKlqoj9RGc!%aMiO)ts+jw0+D$=nAH?^G;2H^W z5c?)|+sy(wm&uB0`Y>g7>u%6;!xOnbYV!!3x5iz8ZyH-zZgc~uG`h*LcrkT)t9zwt zte>0jJFLIqp)oh@u8RI|#+@(YbCou+!7Mh0J>*YG%y~amFj-Ih9u;GBugzS?)a3T) zUyQNS1P_eBpFTDHX)U^_g!T&tKZ^94sg5>!*uUYTy+8ZzZ8AO|*kVnum7!@{Cv2yw z4<tB3<_;oyZR64v@dFEwtc#Qm}ScHQAF!e}g^#2Q z(6glZRzdZZ;am=?6=(*W!b^P?29LNywM9SS@|?F)5PUDAzG^Y8?_pzcub5(m>a&j` zpC$T@b1OU<0s`@s0PPL4=KSyJmr}LqbF3f8-JKyD^&r-TB?h=whj+0R<{Rr%-z+;7+8OP()$df>_P z6FbRH(k|&4zfn1dt4%TrLCPQXkKdc!4|CGm{J4DS{@MFbOh<9S8NS6S+Su4tl0NK_ zj*P_bS37Q3)k!Vg)?kDjJ5d_FTxT{wPlkAn)C8`bEpGcxw#g4Or8)3#tt{RNZJSH| z{5h5I?*+T(aIa!2wpUF0Lo95Ytw;6uWO7@Oo9`a+Y0~akiVwai=^FcnsMi~lu-K7& zKFhs;>x-(&EU@deF{5|@=nazCVKV$}SW2C4<^tG_AgfyBHS3>UQeB_sN!ejXhlhC9 zcfxo{I9#b4D_PELJk8oRG?nS`g-44R;e|D6?Vp;@fR`-~4@=9^_R5bcwISIgB5=kq(o?6d+u)>I!?FaJ={5#5po6{H!sn$;bV2rJ!}Tnzp0O0mNVdsqOm2U@OAf>c$-u2e71oHoJ+}~JpUog zYG<)B=(!xxnzsbsOUpl0&CJT&Ia~-!bMAgJG;bzp%yyR*o+diP4NLb?0bQtCN~t2> z99|H_(re5chc)KbE#etNmOQY`GvjI(pIMRsU z6;eEPE9V2Fc)KoNdpo`7iq996!Er9lTb}`5V41$uVTX^JpC@?Q+jh|V>vUjX8@$76 z^NQ0}$`ea$am6ItF0Q*ZSosZdTYmpME2+#>09W2pahY^<=_Ff;DlO}TJo1XfJfdfJV%cG&;(hWQ=Y5Yl*n^4rPjkG!;>Uc%F;^!|4#H-o zZ_(R6eb=0yK*4QOko}XozLy?bL)S9pl_32SrF6Z!+?)2=u2PtxCu$3R_f9GgUJ52X zlW$C$>yk|;MO|yTBY_qJLMzm?)DdPwFLw!?u*fP;I(9C({jDQbj<5b~Vheq~LR?6;!} zo8fhB)TM(>rkUxV}8(I!HVw+2_cx%LyM>ZKDRw$FH#=a zqJC@Gp!Q^saj6jR<37N7u4#{9lst-SHG{&cxLp%4^Hsk{TL+R(?u!Zm;kO^J`i@}fr0~aF`6G!PdJjK6j23z7Qc!!Y6eC(?_xa&+J*$~( zjDtd1&w9oOI6OD6_!1FS^{A7%-Lfc1g$h3gGaf=-{rpgMrfE^%LF!>2&mZSauZ-yO ziLR%Q7ymOOhX-NvA81E?|17nlqEz7%SrzsSBYfm zh0UIXg6R$E?v!191*GAy8MUuD(O-zV5;GnEy*p!_VKgM{qrTHddVa7mv9}|PsiS^V z8?BkxU0#l|+kb-D|72H7&i!Y3(tj|T%VItO(}^T+%=Y}!9*tDLS-MB_I*qp5ZIsy1 zx<9iYGGURVY*~D76?r&WJM-??0}fL&Z_Gh&gr6!!lG+4lsD?Cn?W8Eu6#kYSy#622 zq%oTXksO!RE4X+jHa2#a!=9P062IT*J*tW`bxG_7YF(0Kc4z;6e&WW0Mx8)uA~(4N zwH7Re^T|IL#7+cuCn?^sKvR8gs+!++i|?waGvoRJD39=@lujv~MsH20`4TxkCPw+p zcX8~1o2wY;Wn#(=KVqrGrqcvsZraZ&6jM^SPxT{h{yg|S((Oen)Q=Ep`%`b{Y+_=`l;9K2N$hH zzP1B`@&byJ!KV@lZ=+dySFxVR?}vBjoozUK(XPJV?tOKeS-MHxaQJGtt5|T?%P;w# zL5UlOgQw}G?~>tC>7TW-n4bJU-OUUGR0qVMJkC!9hx_u>*`S(lw#T8H)nmUZX?gcY zY)O_vB>}8lr9H|)Lh5UuYLD4vi?-;g?p_j9FU^vf6h;8ioq0=UUUR6jvhHK;UNKA@zyaBIB=NV<8g>wC5N-qTLyc>Gq{FP^^4UtyP9Q!(TZ zhI?sGI9D~k*>QqvOl-Ykz8;hYNyOssrEe?W8~4aLiy_K8(YLp0qT+f|xRT z9=Tq8OfmZmOfTOGRBwbh(VL+^_N#d240=W+57a^T!qeS+EvOuyaYC^6iS8RkeFr}} zj|om2=r0ygR830e1z&!tB1*WztQY_Zjpy9Wyb2*n<^Bb7d&s3R=*Xir&H@HPr|{mr zM(FTukw?Z2EBlPzhy0$Jr)BwlDa9aGc$`pPseb$v!d=E*EG@Yn*^qS@1p(bkqcDOZA>9> zQ2Upw(X!_FcoBt76;a5+H@>AK^kMOD4YZ_5aDu<<{$e)xvoSX;X%YeB)B6u%Ajfs6 zqt%nyz(<$RijhD=a&77V8NKC>xoPkoUfRU{>3lce=>Gvk{Qux`}TpH8Hi+jNX|7+BZMJiJ^lbdhxPZzcX$zg7PBQJ~-4wlq(%mO{5ydEhlWfFE;!)y3XW$=UqCsKJ8p`fLw{G#O>x+E+837pSuhe7zPv{x6kYSel-*s>d*69 zaafXBQ!eRvbi>|O$Of%pz4G%!2jk-PgZL1^8cV%;k%EYBDZ9M=IDpAd8&<_j_aLmoOtAzqoiyiM1n@Dtx^aPsV!QSUpgFPt2=KMDOmrJ;`kw$E+*s8)ze|TeyeE8? zXljQk&%3*JwXt{O&Z?n+#rTNJ-Jy|>ph?+oDK};nXQ+^kbNLg2q0+fQUFG4`lqHn_ z6=y=%cDeRvo68mIAMDYTYbU1loV(>Lhl`Q2Dv^Vor8WY5zzft75d6pIgb$QM0w4_> zY)5N@ZtA2mQMCnC3Hp4?EG?VDe?fpV{a@G{<3lcU9`%ha;NJMS3C^VgqJ0ys&+*8BQxCR(CL8-(vBDhC^~f$uU}=iEogSiwJ@`z=TW zk3P216EQo33lx+rm>{4;^Qwx~fbOOzUs*4UjDSk(s+4VJ^7Q)@0Y!cf6Wnj_tb3{S19@ zEdGr!j!q4fhOD|rf8eygV7By5Yw6c-_*$|zUCfBZz~-X;S;ALdFB=J*X-=vj`v#q| zYe0(9Ru?-0&(w1QZxtnQX2hjKg*H((Hae*E^?D zFwAfY!@`N-SO9fWuUd%tj&v)X7-;Gkb^C)pLupF|z)Oh1Wa3aKYl-bYB#MqJ$M`Fe zLaMK0?$KYs0At7cx63p8SA?T zdmnsXRDY>wpR_-OH}Fr?^Jb!uu9Dh6&EV#L*oWa40^RMfeZ@I@W*OT`w|O$WBV8V| zxWDT4;4y*BqD@slM(q75-{AAsa|f&aukHm%MyzTyA3x7>pQdTC`YQ}GuW88gNX0T8 z*+QLfyqv^g=H)=KL+hz5y$fP|BzOg(k;D8x1PUDTUUd50rFrJUUXBO;R^G)X-TmUA)X|w%e4;6=4M17Kmd_yak@MM~x&x{0S{WM;r*g`3n|9-0C(95fX zH?|s$0}3n59HVlpSL3Pn)q~>wfD?t$QK-T*raB7}U!enxqL3_4WUL^4h-nm1p}ur^ zpm=%vJl{x6`rs?WgRSRJm#Px}24l=%7WIz-`$8p&s-W~kAd0)dDyMRo28p*b| zj&+5NLNs(02T>^gP0G}h7y>zEN+kU%vK4@!U$;A!lYXC76}+fS(u}#f_>dzdTr@Le za`y+8TV;t?^RA$rQz}x^TE=?>-H2vfmx7vuEG>p^vpOd}_4QH?Lp1+vvc7}$%$TkeUFKV|mJEEiKRVsHey*@)Al%IbJ zJO3&UaHl%>#BOY0o+N#|GzVWW)37#8LmEfru$BAGjljcUqZMmbk&_9lIyPJJFE|(G z3D4BI=p);>{QV(9A%$V7F?fH6hw@g#j(vF#YW$dSjF>q6O!?|8v5T+scPMIGBxdsyvt=096<^Et*647P3w`6!nV z#*#*RaBeLbp%oFb8~d3-)??D{$fIaiF83#;?XM(QM#(SvQmZQULqm|9=PcACUFfe= z%L*q+{an-d7;(%-CkQ=0Lj2Gy(=x57LgmB6ZAE0`vd)bDa z+06YP=m3Q%#zF;;^ymV5TL2<=zxG~}8FG8-uYxxRhd+4YUw{fb3Mc}EUElk00$@0Z zh8GRUxz7e1^TPS%6cobx!Vt2la%-~t1z^oUqhVG?n7+TE3jx1iyRHf%k9g1IwL|G| z^SSc}75m?u0w)#Du66S##4NoYi*6U6fDqtOrN^7QYXKcc_gc{BHV8lVT@KxWKB`Q6 z{p-?4n=*~~ycTUqub2C$W1)fshmKe8T$F6BEExbYW7RBvA^o~3V{U2jUF5fwXY7R_ zkVSo|RQh}LhM_q(6xDKdS|w+NA8LuUxXT?a*~Xyl3*_QXG_}6d`qg_7pZ~=^g#giV z`yv%EIc;sQ%U}2N6jPydMPL8FBB#bz5?q9RD8ab_?>JRMA%@a5Qs{752593a_3ph$ z$3d{waUDK1`7YyZl%$Kk+nYEpMWXI_UdWg6yQhJ-Dy{r(&-Duo!Rhr*f#DoKH9w%K zc>YyB>SaL7Nu`!bE6cYH5B*i_|HddD8Qp2dbKeztFHp9THihyITdufs!@^R0^l-wO zYl!9F{Ms74(9RvLl{O>1ZSB3)x_N6Lx=Oi#5(2)ud-yG)IB z(Z#G6RJUzPKD~~p{Vb_H5m&NublGT{?@-EVQWBy*(VPY%>csHfvn=)CY|4)o z_pXove%35O^O}fMZF=?8#xj(~p@ss2OZD;q3A_LK4Tbci3d-r}<6Jc$2ajLBdJYaWB`D-#Sh zC0vu4&;tj(MdL~X2P{!eIF3N|=Qwkll-UFBJ9r`PRcnmmmZ;T=xf|nAgax9vypqkm zE}rK@Dg&+G;)5D>KC3k<-f@ zE|dIkV(5bOE>@rh9kMt*Fh&)IH`iInJuPfyqLpJCUi5DmmM(B*ficcWNwon8pCL} ztNZZN?V5xTw{}V!tzWD1NsUmFsZAzKH2Q_6+aHCYNBIWA%lbogO->Tb3)4MZ# zXZxfkieCMvL``@f8rll{HgU*;63@5=VtSV?8t{m%OxlOi0J{#nE3X zdP#da&Uj~w2rh<-G2D_HF}V$*yUW@7k5NNfEhCgtswjr1@9I zH8}GF&z|*3g-Yq@Yh4%Q_ueaujKs32l$Zqm&j~56=ezM7aQe$Gh^_~9T2L9Ef&^O^C zhqCuv-H7=3@*7{{xvlox+Kp}OjaNI>bt;#>@}-6*SlEP?J>f8T90e*3c)v8v1+?6= z$k?}`nAOZ&2D|+;29OYvyK(3I-C7+Po%;zdLT9%5|rLth~F`UlX zWraLu#D|{6b#5!178Zb5xy6C3nr0|*E`BZm??Egs^u{I_NtipvUp6^?y_dI50eznY}EPQbtij`lk>*BC{}XiclRky3v~1UOY`gXk>t_`h(ZX8+`zk2 z4edXy1Rpo}x-;dhRdL#t+!{BSj9+jCz2_YoZkG36`@b!`$rpcYX%VqSSgiWCS1~+} zAZ-n&d9BqA%gJMK%GOPtT(BBql+}hSXZM{I>mM#RH@;){8ZII{#-vDDXbhgt!ZW(v zarB=%VdAwb@$B;Nw;HWG8C+M_-AArW@eoHNEQV_)jVIqya4-Jp&s;8Q{rEf+Haq#F zxZkU~vMzw(AKoz>cx$$^LtM0$ICqfRXf$Pi%ykg&4-)Qr`W6*)iE#`Q}1cH$i6rWyPl_ z4&9ioLdp3E>VxTM!=qjkSqx%=31q`4yG$WH_Fb8Ti+Wyn7*xuy#^VQ#(~pWDW_LzX z{#;)SeMNmmQbj#xb4oV^M6sF`d!vpuf=8*D#>A;Vp>Cz|C<51WOg?|@B}Cpyg6bx| zW_4`j=u&{B|FKB}hJ$oY&tmyFZDfBg29w-?I%q(>ghjFKJTyxK&PJ7kznqO@IKGQt zgcGLmhWcNPMxIaKFm%NSH&QR8Mo^>J^);Q6>}WyB;Mus9dhTjPCOXEdrr@IKVwkaev){H$Jje!TP*CDcHSZXwZ~eCQ<=a;|weu=e6YWiRrPP`_ zZ@`LBy}UkghoNC-X;XN{V^_9SMID<+c_AqFqm6MrQ?Zbp@nLemZ?TWT(_!QB2Hbq` z@2~6DqNoiMB4cD$<-kts$<76veeq(l%X&fgVwRlV8-kPA7|)pZNHTI4cx=x_|iKT|&SGB4)2;$IEUY%B!FdXaGY@X6R^1=o%=`h^b+cZ9zX;`6xi%opw| zhrYJ%c}tg{)qN@VWcY@}Y+V)85k@MfCoq{20&= zc%P_^cBtygEEQuq5*v&;j<6n3Us8Ef&o;*WQh`_L1R@~heM>^pv#!qZ)__^&-b-%U z6-UPeK5*EdQ=Hx&V%Y97-vleaJ`RzceXI9-Ykh*E>NLS(_T+zblwwEdu(Zj7CRHai z;}iRR=Qa^B9_6945uoU$AuE0|iALj7iw?&42R$~rMy`1_#wM9oF}q6^FS3 zY0-7{=qUDdz@E)DG{1NMnxn0DSX~F%K!Fsu^f+nZ*g#^X++f~0)SI%bm+aAn;lmK} z!`5cF2!IA8Be7PM8y~n33kie71nmUsH7LlhC!QS9t$4oLN!y_8I`&y6bL2z<{K+HE zm%I%b=LhrVK1ipS6)@~yJWCpVtym7b3Fa#bO>lHE_(Y;GBaT_q{bZe$dZh}La73w+ zOy}Mli=2ucK?u|6(JdCi*DAC2Nq*3b2JU@rrZFyoMnDu{ms2W#GU4c-Swa(SGIndE zXoP6unqQu zH5U8i(eL7PJF+0$;SGis>e_(`lS$=gW3bwHlr8iFa;drZe)%X1eA_Xt&8nGu-rLzV z_Y$^OIEPeBU$M}+((?0+)1}g53?)LG6~1-Z%3hQ4a0326U{$@W_lEl}%eZiQyCNq^ zBz+PZp@U|>>@1P79NADT@&D?hM0V`xD<*N#L0KYVrBr zwG6MRW_h3TTdk_7OZ}3u6odXceO5&@ST0eSeb>L`$x|*c#d_scD$G$fCdu7SEwoUu zst=R?8M;t=hwV3aC~J0`A@g0p!m~>Cn*Z6ALx{~Z2F7!J>vsi(UizYQLzH>TnKS`D zK(Er4NzP(>2k~{M&MPjV*?p%@ujYi;Ve>cKKZicyOz@L-EKJX&{3rxkLA&$pz4eNHvc?O|n=dQ*^ zZlx*pr?d+4E`AfM@517GBHrcX%E8Kj)WmjDwA5$3Y>Q4hVp2RE(2$~}l$?0*PE$^X zFy@lJh~R6Pon?m#{_<3l-%&&GoO`~%fqePki>FZk>kXA2)dw*V0XBk&kgbZUoIcSz z99xVQ*S})>@DYy}m-Fpt#rap}gz=%YerkqMqjKn!rT1szY=C?@yL`^J^3v~1uyg>n zs6Aifjnqwos*B1&%7kVkd2%I>dl-HXEssCX)ZlZ(CKs<`$9IvAuS^2{jyBTfZVqfy zO~4={Hyj(TL&ZWsd8;* zwvP8)XN8vTu~ zBMvm(%{j0vjo;OGSeP9$)bSGkrO6iVf$x6j2bD1@py~rtJeVo_zp;7>tNt%oJ+oDk zvkEld*rRy4k^B(IedRYNL)-`UoX7r1`c=$dm?RhTOypwz!&*Dp-FY(GZeQ47KMXv42N`!5w-|R^>YnZ;7!PQK zhK`|Lja$)kkC1wYUf{?!HNUu^>s&UBDkn~I4=VEdin?YjS!R5vsMeHfjAQ!8(-FM< z5L^go>gAL~A2EqnET;&CB&nNT_gkpXOXWucY>g6C? zo2M)Bs1BG>Me1H#v!2m432tMgve!dh6DA7P1Y$2_=y4qD%F2ZXi;`L?0ZlZ!W8>Uo z1z*W&|vZN`FP{r~oINJ=_i!hVzH>^e- zcTF_%yu1Gu93>aV6buBgLo|l|d5z;(;F3(Xy5Hc8I&lV!)k67(0tPAqLRh2S7ii;a zkI4?oHHH03Jp#HvBU!2YuO+A-b!X{V7%@S^!~QSA1yywYAr~z=ZmJBKTdRz~5W4Tf zT)R$E9Xl}&{W&DV{M#y>c(oGhRXB zr(E|i0@kf_f3yStrwaG~k!FId35mcr78zKIFfpnP>#UL3lVY zeGWwQNnwaNXTO;u0=rBF?SKH{^g_u*40hW=57c}3sHWsq$Z73|Ej!}{x4=6Qr!}QX zrN^6Jz0&=u0=qL7HhM7Z@j8Ro_ADmlEw$90cV;x(b3VEub(`MK-*y=0&3AVr!gIBt zh z$`;jow2umS9&Rr*A&NTp-j-JFn9JYPBVpmIxT>>+WBO(uSBXk%5!>Fgi@xtQWJa>z z5bm)oD0|n3>!DF5sGSc9sRem_O`)0bJG9;AdrLiA<=GyEyvb4}!0SCiT<@u{tz7qL zNh|$k@Vv~>B^Uow95JWJXEqmd-=^=EI6~%~9QAcXL3$5qkri1LA$EPHcd9AUE3RARcrZ z2#-R{mw3kyMn|4EedN0N;9{mDrRy8Xq6aB9WJ--bj5Hd(Qu$owlB&%h*IODuNxn3$ zG_dfrob`C7mZ3qUB#-K$oWXN$EOLLdN}V!MIe5- zu_dx8Te4uS$UPGcpCX#xr+k;SsOZtQ(C>(DHrVMB!TeB>$GdyB` z`RqM6vnBXz^T<3VN~kVzw)awEkp_O1X-q`yEkRJxWtFAeEujdAJ-!2d80dgz61+>{ zYmNJY(+cvnp_+@LExe&a=N)7BnNw*W3|;$t}*9#|DyUEyNjyF(ToQFpp3XB9pM=W z84PEb{Se+cA7S5JAXt6B{XDpr(W%-nm*u*&S2*Olm!x#ut+O&m0f0 z$&n#ja$E+s+Rw~Q68fSOT9kyj@?sF-SmQiY|g-b7AZm46i@!tM$zTau!djO51$IPg- zWO)k9$}`|Hm=jo!hBi7ZRod~``-&0+{oKaS(7Djs>wi~XP2;gu71v38`aP?}$n~X^ zLI(1!$L9y#YhKbhaT$ZO)+OYL<-Qmb>}%Ck)0<^KJm-^7E2YnXBkgX_1^FL-uyR*k{PlV(yYRKpaGMctnr@el zCeT_jc}p)l2M|2iMD`xzYs(rtc4=iO!Kqo~PTCsFchPl+UG0t5oCT;^;lwg zESTdkr#bMM5trSJtgB9BdM%b)albvuN%O9YbbUFyO`S3GgOvRF=Q8QoUbS}yyjXki ztc?~A$6R|c_i4=i&m!;tIg!{YloG`~~m|M0Q>mA&FF3v+z41vlnYarv!7=Vi;m{LPgE%Hmj6t z+J}F{>+X{iWRk6q!DATU3V;t@D<+9>X$DFSMJhR7RcXAU-!G+xQpHjf1kI|PVJ0Z{ zNtx9qS~4&sMR&-DM!Yz-^S)wVkZZ@2Rd86*{W+}0r)iS|lLC`p-GPR~=T}RR@)2+N zU8b-_7gsYZ`bxwZ7$2-TS+jZw#zgRV37dE9Bt9@|d=moXkJm}jB{9bjP5^_`djuZ; zKqmIIuNdZbo`=6yhp-DW57jNxZ>Nf!xw5+5UbxYF?g9~auj*U?xl?mpvi`iyeB*ij zx10jOh&-C&Wv%l1Bd~(L@LZkShV0fCx@Q21jpG9p(@XO%h`Xx*^SYqpR+<@JJfJfd zfJLC1G_ygDls0288j57Z&N&Z*8tC9`$7EnkTpI1M6?|a7e$%T@dGpUps<_pvF-ipo z)^2;fa{lr$skkgISY|_w+74N_IcAEoJ)ayHwk;iT7cB;yp~`FZk2r}z_gq6A7bT~Q za(WQ&qUVkw-gJq@p$qM=L0R`dD zb8-BCgVF={yR$Hy%I#QAEiCM$BWzrn7dyF~5%H{iuhZi8BnzP4UD zq3Q`qkxWMi#$0!>aU4d%TSF%EQ!q)+Q`lA@al#JkuGF|s06a4gi!|CC?xkY2g^|^OpgSv7W`1hm{{X~Ne_g5?t zXe~C1R@)zxAT}|iBEt#Jod}6S{jCv!vUSjv#HvPrJLUe z@5vcDt~``>V`MQZ(=HkH5TBZxU`3t*-n3+?aFnKhs4uUQh#`^(&%BhqH*+F=mf84B z=$jK%5<7_a;FHgTb_7?S>5h)&M&_zrxOM~FzHoMn+BP~$-Y*e$y=PL zH_AnaKqV)JH->Pd2QtH8zLodSl?~pOpK?v+IFZm#d-=72vndJyKQbj=+qY$q=;exy zV67|z5WKx_TjI}W(6ILM=G*K4faJ<&3xyq&>yFL!@!9ONJRY4qKI#%yqSnhB#9?r= z^!90ww!un)4YkL)$}wv2HkRHn$(M+N49J~pX3l7^p5%u?=4NlOKji&n1oR0Dg0_E< zTteToAjk49W1DuwTh;4(c)X^sC*OC+m*;qmNgwF${qg{!WF?u4isl3z0NB-UEXdYF zHTN^VbEyusKC$mcaXF0b&#c@ML%+-1I6&($S+pH{ldKofgWp~HQ2Sdif}G;5uIFDk z*g6s5*%6d5t~(?gJhdBv5uZXnX??Pi6_-a=-2VDrrpX9IW&s@7?n>oa#_2ytNlFXX zviLZyip&O{Qfb_!P|^12X4f>~DjbOm!?csghg=SD`p$^joP+xbisu_2ml{6jcGZfq zs#P>kT<-?P3G0>&d3$GH7eaGS`Rf4@%h{m0pztt`zSkF`TDwJgdakbdpr?mSB}js7 z$nG-O6bep;@u{ps;<;(!yVhn+H*gk=TL$+OxC7%I1+WIFSFItx_OLJa-Gw4bnTpJ8 z47u<`*#>zYv+=K;X1$QKi}WwOkl0JTr3Du6F4KTKg1+!Tl4v-+M_X7y#}2i@0<*!6 zvPY4f|9l{bHi)SB%D&kMP=8zVqJ9wc`$I;%%z*5_)f}?W)?al_&2j6D65JF;vONDl z4t5!c7P1!7s@{a_B-4l~!|iyk$VYsFC(AjKjS`lgxhhK&K0%UB)l0-j_}Z|Q&85aV z7O&-|y35-^fwQCPKxE=Y7MWa51tJsW=3gR{0LVX!O#H6&IbA$;MQzp33tU7OaXic` zR~gk}9)v;#;kG=7Yy||e`5XE3m!|~q`ghwtawB=~8GnfWtl`GGrLvE;nx${iE=dxs z35rjV*nY~ugM!RmNzfR7dv#Ekt2zzxnaGnJApe%oF8lMQqUW#WuSTWguMxiH1s5!p z-`Vw)uV#cYA@1l^t90ak54x-=i?JVp*0wP91qGOCgTAiNMk(m{FyIt4g zxKQ;9TqP5rbn79*-^?6zh=2rx^0#l$LOk>=q$j>y3BLa|{H4U~<(k{+gCkXoUmxnJWaQhi^z_v~W}JuDf3nVrY}`7XA`w z%U2am9h`nYdc8l7UV!K04G}uGs5>(j;RSlJ zH{Fg+Sgp;|x?Q%+S&*YkGWm&ycot{+u(8@+4d)<;l$_|~09d~4{PyB?+ zxIV2$(p?WWLkT_)Ax3Ux+^^WK$BD%b@uFHcbpKYV7?4#ec30O%p_!0uIJ9pa*OWG= z%LR>o0=)GX9xvYV2wNWmI0*MMQ&!J$3iE03qoa`pOhHcMA%=16$sG+7lrA;G@TWV6 z=LiCFlF10sVlrTM!EK0Db#JeC|E>qHe^V|vl_%mq5^oj0*kT#gI;bUl_EJ%8r&Uai zh{+ctJA=F}nYuLv6~I7aiYQonA6{TLZ2yeBe{27fL8z^O&p|-8`bV{M4DlMh?c?1R z2q_NkKpmqInK5VyuSsZ_K|&h2p@Ay@**Bd1=f2^;cuDsiKKpK_pRTlj#drT# zMb3T@)BZ&eSW*8Mems11VGsXpcqfEm8{t&rycYnN-uwUFP$)j=zy8K2IjnF<)lTxT zl)3vZFWE}thi;qk#Suvvd-&l}EXgh&(O|KYvG?PmQStz<%G}=S9wGiG_8TZ+UCBM_ zksT|Gk*bX?uTuAIHLn`?)jZ65_ogPS|_%K( z(0X$(8o#@>lki3}?XP=;<*z?#mePMmN39z22!VTSy!Nc> z(#y!5`}o^!IAysK`(x+BqUT0cPMaLrC}|%{E(|Gi*@RY?94yfM$gMu{bmr=K%Sz?WjFX4BN(coi-S)c9f?yc2GtfcDA^9`x&PI&G;vdwepT^H+h zdOiZ-5D8*Bbi2ziwkH_Im+8S|X-q2k3l2>?ZL4HGgQb~037NcAv zYg3YoAz$g2*L;YV?=~Kk+@*)K5`e1`R_1gj-JxQBi#zawDdjTiHAxrHD zyxU2Ze6^cx>jx9S{f!#1RI1A2=lIN#0e5N0&8F!Lx>Pm+ZY3tZNDUr8&DFZ2&=ctt z0xYijOUz||r5;a>^_M*CSnwc{RwPRAVBBo>*a3n-S2(I$I$gfwWQv|m?B@U%Rd^q_ zAHlfz;ub}C8vV`($L8wsGEyjqbd$UVH?d3R4y$vKU)J2EcpgCvCj?3lZd`81#-F-O z)cC>&vaqOu!0mbz$toN2>r2iEq3)yvpM{1f7FBhxWhhcVZvOMKy}B9EM)$kueT^Ph zazjNRUBKz-QG445R~^mR|ETz+ApRXSJ0BnmkM*cN+ah*T(nd(Dz>FF5{RDrsl!(2# zztlx021SJDr=jQ=8#bV%)IrrHN#&s>P=^jjss&gP?%@R#Vg zMUPHnyp5! zOauUwR{d35-t3W%gursk@~cGI$cz$AczU&%Z}8UNc0?8(IqHYxC~=FN zFs00`At*8#g4>OQe4^`a$vC|VSrF7&jy8|^Q2L)DM45B@e zaP&AL;LGESMEy5y0{4Vn9)(W_9dAO;dO7^l62(&fqrjvDm<~-zmTCTVv!^>X@3w`a zSvv#KHHJ%x_kVQD2lDyylT=dF#?m^3I}Xn?Rf*fF`(6>22C+`Mx812us=z1b!aixz z8V<}C3JhtSuSX!${ncMbiMmjk?^~Lp;9eLz)30UOL7knQOsvc<8YZRDR zo=WY`AB|oWQxc$+I0l!)9L=JOdRBVAAYumBgsqLSE!KhsCNWFSFkr5*-)7=elbY5%QPL9?xq9mJ9rq-y%%_YOgmW#i#|9+6 zXqik?Hh;YKK<;)zVs42Fh%rj%&@19hk>dQB`2-+E?|a+rio({7s+jK>(KuQzUnA#Z zwQ9V2ta<)%mdnlF4bDbAR{6rupJhiMcMt zoSsf~vRioRaMcBl1>tbJ(CZgAU^mx|qZWibX2Ntmx>V7rnb(TVl#tq#t*zHOa<=Q^ zdY^BrA|IpI((kb`q**8qYf;~SnMlHx-*qqX0Rcoud-JM4EeDmk80y7b$l~;o*1o3RxWyP7iY3+=iZFYlUt7$Ht{g zee7p@fQPg{=(jAmt^H8S3R#yZ31&_Kx&i7(Jl1Yn7 zZ%|F{o^26JQ{cNU!9PR)yz#ojYc+kq%%$a7Rb1Aw%K!sUC7Z(Yz;aMriX_N`sv=YJ?qn( z44)@&hiEyw$g5S8GVBqvoK^fc6fNpWeO|A+q{?Bg9)bF&qQdvZA>^u)thf>2`)Iya zb7IX&h=^PEl1zKZp?F4!p$m#9YiAjSG?Y0SBm@%vnk}_E<_*(BOq(%eH!}&SMr~q@K=U0?Yf&@8_nR zZPNJ;wJxue2m7sv-FT{7y)EeQRCQpQP7KjH_fm?7shDw((UUJYG*B^6r8Qce);KYN z`HgyPz7XWq9Yq#sh=aely!~<9xbnw%k8pqe4@VQ?L~cbA-i^YK1WrV$x-WG??bK>@ zmT!hIjE%w9DO|JFCgPUTIHc1nF^`O3vr=n8ByJ#wJ#+Q1qJR~Sh|5HA1|bJ#xwWZn z-611$e0g=u-LGMy;Z2)`)xBvXAJjZC6t)WWY>`_~#P}xOpi@Eu1XC7540@-m&tjb?oZP~-e zekLJJGZdR;CX!&WwerC)z3K1adUC}ARsVZ=NFAFcb{#Co_YDgDlwyx+HQySWWL8watBz9$gI;uaoP_e(4Ce%;cvD1?$YB$JWe6qY=7>0Ei}$#JOA)CbJyH9ix7hl zyzJ7y;33MGYgfRW{;MZBafci~v=xRW!*Wc}4~8+88)T-7AWQG# zJJRN^v~3<3+!&CPdtahZC|Fv1eJsY2E2%LJoUp730z!Q%G!1A?KYN>LJJip2BRxq6;!>x%%`oL7X9 zlHaHT<;A50K&>;S^DuPHFa9pWt~UX^NrN>;K=j{`)o_lcK=plR`0*9;xdN2Su2IXc z=UPo0afb5!8-N?+XlrgZUT$$Asn{sMYX^&{+OpWo>8yO5x3|LZwNQVsr<50CAoG1Q z!D7>DYkO}(%4=m`IpDRvnzqr}d`-o1*?5w!k9=A98BhogMjv4N63!=wLLCK|k6N;- z-rPZ1tnT>=T8{dl;H3HX;P6fvRizk7pJ6#uVoc_C;Q(Da}w*;vnwY75LsmRfzDpn}!> zsSE4Dy@^QJd=_?xaUh=w{k^b75^;Qq|JDJ+{^tirjA|1C?tkjW1bvnSnDhgCChPsC za1;Cc%WWyp6llcsr9o)wd+ffjfuP1~s=WKb2Q{UhD{3#WasbAaK{CG6(PCBIzN<@p zF<}MX!4cB=O=}a6_3)59uc_iOV=*#UDNqYz(QR6tFlsG=TuoF3?v zh%aSglHgCTH8j&l_sWzQ_>LG>q+ga}WJ>ck9xyC8qVfr+>rrsam+@1XN4@ncr>xF$ z&t{RoRJe=z)6Ho9UMe9=&F{}#vg{QSQRC+LN}pGyQAkeM{2f8N2&uSdUs9OomV6|4 z3{Nz9b7*eYU+EL3@PQ99emnn~zUV{=-5zc{)_yI_-;7`J{!GGkTJFu*=ER++Y(vR; zkExyp<2GiM*SHP)`8V{~ZcJ@EjJG%ygfZntjCfP$D$l36ECj{QG3M0{F5i^mLY(PG zn2E8EQ-&49l~5so_(6TCH!&U`=#sn4vT~9j-L`Q;epy-G=>^M5+`%cn`STg4CL@dc zz+O)q3UV-C?X6ZohP-A^cZj)s+*{29Kq+a*qtA$vHMH}Z!!(o&ACyR8#!lqaD5U1iS9q%P)Q7&;`bsD?oUb~9Oaz%bquF@+pbN5{ z0^ARC+wBiuM(1mtQV|nEJnw3BV_TcbRPp9uN!aqZQi^6u?LFGSSMbyWsVo*qrjsr15f%77Q=UJOGhT^Pf>L-G%U!%4Wi5^bm>*=64uRXZoN;#uG=Y9A&}>wrv+ z=7hLP$1Hn){nnk3&dN2b?9+d;=LUlJ$H9SC`VOB$38^kL1kQyumdkDi%>2J?`(PLh zWfqVlyY^M|21P6w8d~I%RI;awL8{1pj65bq%i~H({P6k z7A`c#1%s~}cPheG=6<1h!je8~uc~aqB(t-jgkPuFRmWQL0Jj@onx-E8GC1(Hv`pvP zNr;6e)3!Gwga)6*$$Dxnp@y&*PovnEMLbUo1~oav_FyOK{@CuxX>4EOS!e8&E5lUdsHy4kqDGw3e~erDeZIyu7uSp)4zwceMQAlBwoc=X z6&+O(fgZ08ANJ=oqCrL*gc;s2gJyd>sH6p53I`If ztjdyb1`^5F>K`2^FOpC{Q%}xr#OG@`F$uHM*E;~tVD(^IEc~u=*;j=9Wsy1;3)w~i z@}YwQW^vP(1#MQ9i+0*7=NDBSqe>ijB>3frt`W`BRn)s%`b`qHJ3CIfO#Nm9fY=rd zbD_rl?09lxF)$hk(Mt>tB2~wDK5{wwjJQo0*a{x}`0|$(p4K%GV}zNCEC;bHVPIT$ zLuAMh!+1iiK#LVv%IM9s(&$?E=M)=AFWq9UPRriw)_?( zj(P|}{RkkkfL8vIoRKp;qTg&f%y@d!r_2S6^{|?q{U<-J;cZ&Qp5*8O2>)#jxa+pV zmR0z5)rG{_kfZyngXxB)Jz5@|Gu_De?3sPmBRjP9^zi_#@0^LyFJ>lTKoh|ucw31L z?qk2x*)i+o77UW7=)g|%v*9XB9!N5`QB^;>ftfpF5Tlr>Jg=PpM&)8=D+N^Z^8Asw zfFe3EmT#uiRZvcwci+%{IF1ILF@D4r+dy^dta4B%@dn-{_tzA^-l&7y?bAblSQk@# z>U$Z{*4tach5}cpVfEK1Db_#;*;-G8jAamtnXmBS0cIaziBreUdhe#9MS?RDn@XD8 zVh#nn3W;6w4D@7Gm`(sF`JXFcwDzcKth3ItccVb^Jchgw;>l&pWcKKzc+~Dqh0R|z zU`Isb3T>kqkAeNh%(M+D%P#Imtn&4DB`;D>x;%}DYZ*!aS#b-x1Q$vN&Cf-@Q&FIn zC&3I*CGa+XIIbZzQR;|{s2Qx%r=>IJ-NOh(;f=%I1eQ+`pH&rB{lwIEvMDJZOju`2 zv_=Ks0%`(*>MxeCq+={E6LCRbbcP876Lku6lt7?DZ^)UK9AhRyN=T0CK<-h#rc(t99}kqo zsPh+SIZ7OZ8Zv;Cv#eN$q#O&TN^ciwwc81Fq_NCr?>uqX`>6mb|Hh(FhkJ z^mI6(U*l6nE^SZ<)M@r@A6;YDWdwbj*DyywKTq1F{6~a9wMYr{3FQ zreLOE@`);y0wIbhRyajLbkQkU&1s?Iqvxg+_otgvd!r}bz5UtwX^i_smaYaNfjKpe z+PzKrVF<=wr9$)7r<>6>F{;M{ebS`lZv8QUi-=SUy(3Hsb8{QOJS@5SPLntr(s0WO zE3%&IT)y{1X@*+AePR4LZz};C{TVCe#Ll>msj`tPD%-buE>6+eWJ(^Rz9cMgN$7}* z>be1x1%jj!qi3RUucv-}ILO*)gol~$QMtcNLxYTse!?>b<4T46NLm_bZZ^Y7NUzq` zq+f|a2B89TQ-b%kRcZtegUT%tbG`ZS83_50*DWqg#jbX;8gUM*>Emwr{=E4eva!w!KhOcbVxWNWj zL}p?JVcl8eFt~w$=91qZszk!C0T=9X_RrDK`1#1dhnbgewmm10giqA2$O4}E@4p^e z=Jt>DzxVQtQm%Abw%6=T2RQBTgCF>hLmoR%0E}RX+BUttea{G`d;Gt_xXTd#bCyG= zNMO43w+Q#26TyH3V?ZL0fy_TY%QOy(Kb~N)ujoeX zo^`Gv4!grN-Pfb{R{zoi1(KX(Rb?lc<+wOWx&F!v5bge@JaV@&ci+Lg?@4;?sojw; zAg2nAgp6C&xQw6n%-m(ZZ>wz*Qsz=`SYhboFx+5jmNaKXtP+0&moX3jwmF&iWSqES z;b!MGa(|rG8#AWsOL4ur0<~*RX90vqqc#9!k+NdWLwk=ea)e;M@RgqYCKowgD^lXwzLmog z(b^*Uo!g;$!tJXA&-6M%tj?s)^;dKhBQ+T zCs1~EfL;SNXo-492E5!zfVi%ocKxy4iydLvYL1Nz%#5?yw{;(4TG^;xlX4%r>*rlo z&)~Z0wz;UafsSz=-V7N8ZQ){k7-)b!+yHp8RyeAy{R6p2ChRh@w^XsNGzms=Ra;{W z^7?}cYHe`QT9#2C`cpPHK!};^*GM5VSIs(J>1u$U^{j?{rRH|FLwEKyh~gE@i+t30 z-5=Six!P3TG3s#DI zU0GVY8M)Wi-D4&|HYokz87qUhO_YZ?JE{#MQ1F$@A@}j3)GR@ z=nY0u%rRv&7M%_x1*bSWlU%+WtXVDgu$O$KyYp_Qz71NsN&I-1 z<28sA$?9~ba5h*EAQ2FG18UqJGp{3hOOS)@XS>>3REnBH;ko2}P67AE(6GX-!+xlkQR7pCY( zSd}Jvhg3mN((s-Y;6RcOZXmFp`kGfs__`aR9p_AZGY>#pEol<-jMvEHLaE6p-iWnz zu_ljJw!jKaiz5jJp>^*Egw?I=!#dw{(!aY^7^b5Rl6G;d;rGrM#q!zSj-IQ&kNcYD z$?QZ7C8F0G!rYiTPsF0qO}~1ZhUmi2^hZm0f~f{YD@4z^tje$~oIbsPbND2x>9!0UX!X-f7f!tv%B$U$spOUyh0op$QcI z_M^;nQWQjToh`Cklq(IkI=`oFmRF2~HOAS01kG{n!^!&7Jwp+d%Nz|K!9d7*;C6nR zDJwj7lS~&bJIr&=Hvd+;-iddv?UvF_O%IUVC|$_Us-gw*d^$plkzdVMcxB~T++hRQ zPkE5-Q>3z)e*Q$Xy-EjJyDpC{62qu^kvcxuYFo7SrpLnG<8JpP9CD2gX)k&|kg2n< zDdPPzxp=_MFD%T}rlY6v{XmBFI5)>JTzy;tcFs?Ex{=ag;o!Em*~SB~;q(<4)6MF; zE8_KwZf-vnQ7jM6)2tAm%6&XSY8ekY`zr55vqHu~lDsM1nx6J2?(HNJKnK+qvSrix zMCl=WGSITnlhs+!F7kT5mfLVdn)`v+g$CvL?0K<^0(%4L+zd^x+C3#Z8wy})PKu&a z9?8bC{a}k>=*x|}T5P@OZj~$1-GbffcsmyLBks^_X?BNh+bhLWAZr{SO5G{KysUat zFoiKCV((516hR$wMvOddvmP+KsaHLMGo2+T-cE@;oUTmKc>t>&ACd<~?uef!x`4ph zy;*!66O(J5{1Al`7%OdtA>wne-FZA*Z?k5 z=(O#ldn_j0w}|kwMzNn8&8=8;3+_~0X#RLQ;mIszT<|&HFcO_f{o%t0t-Hgla?KQW zdmVp#=_@nu`eCGe5hS{XVP5K`CN<{EuW;ulIlZM``{d<#y17ooBBblWtZ7A*UgqG@ zfd{z^{RaWlOuDG76Rcx@P7m;X>cXvkEM{-Y-!i{-%KqoJv1x@Kjp$6QseiKF^;Kzj zVlHv{*!UGU(Wwg`B$wNU1?yS1SRKq(zD9kz8alhsSXV$&*-1dno8}dBHZUZ`$t0K4 z$mDM0ca1}IA=zfoxA)ndd|^{RlFG`H;DU`@Q@K@%u=OumibY}Sm;e< zFFwtOJf3efq>{^=DDVdTRVLT)ZkKAV>EwpM>~rZiY5PN6a>9_UZN=87mo7ShrFoS`crXq zsnb^);CIeRmEJXNF~=*-Xd&gFbV+w^&^oH!Qd>6idjIs-@z#wzYy9`>k1galHLPU)NYXbOh9RRl+Ytu+APNFsmV2Gtv8VraiU#Dzu85~F z+**pm79AS^%k8_)=XBLoYsYZC|F8vazk4k0G;%ZA1fkF2_2s!e7^fNx_uKzm=!_{- z|4Tt3iPsP;Co2+pcmLEY_;u>VfSaVn z&XSG*pY0O?0m9Y0MJzE4`=FWg#V4u$ab}f9ZQ(Odlc_(oJ!=O%?}Nm|a~msWA4Ee{df#HYpQ8u^a8#j`?W5%?(vEYu!;%%apQ z_bIT|#mOzqZbQ|L*sJl$QoWsLE|8Bi-L@Fetx7ycmK7;7K=t;<&wlLusySb`=v=e= zHbpx4x;pP3!#ObTd~?U|p$3<)h*Oj><}MBKKD({=BlBWg>&>%dI*(<&Q5$z(r#Kht z!G6yJ2yXE8Ogt-lx~ir^7t<+xg=Z?Mvr83Yantxm!Z!sTJmS5iMOaFCMEmY?m0j=| zO`>=nWx_SIOTn!MX_dfn_ybH7uT&=vEeQ5zjcK7IoicLRFV#IdKZ0m?^cLpm<5m)v%S)1L&)SQ z<45>;11D9#qgeIuyR-GtojEc-DB^)|dzq9<@u{N#&xs1hA4}S0dKKk-w?fu4)1}vP zjlz?8r+!(xjp&|Xz%H+m;64(F@7wMvGartt*Z?_DiIi;cyaOuoK#9WHvRCnDUPwT- z>rD4@;S>59!PMbI2vvtEFOZb>Q&{ZoZybND$eG=2Bvz;&iXc zYuu!)OW`y4ytLYZ$x65AXp4g!la8M)uZcXkI%&U?C*3NpbCMa!9e0hCEHnhQm2dxpQE$HIM0gdr zMNgdVOZ;4xPA0Ndf>oYe5w{El0vVc#s5X-^Yqyp66T8beY2J;| z<*&W+tGF}ZIHmfxgnCH^nB2IDGX8^1W2JC)x>SVLA|xVg+z!Iqsk5LQK@ZAZt$$mV zuMobEWMh~YpUE`EmDcyKPT7(D#dh$;y796s&uOyL*zz!Wj{AY~$}dIpFJRvP|C3z% zXmTJ&g_>D$zyz=O7Gs@%$1MF& zN~F8#KBG;}Zf^aYd&RzU+v_y1>wL@ez%nCvJlT*v49wW~<4gT@vA@RjpoIe<H&rxlI>PPGPxu4na(DfI znET4GD7&^@Q4o{|DFvld1PK8hnvn)+5D5tZk(v=<96AIkrKKfC0i|O|>5!5dI);>n zp}S|_gZjMB_r348f9<`G{mbLPnS0i{*R|HV>O4;_K^AnEw+3~)*3da?IzV`@i$>6% zQxUDEsOoqU<`GC{i4PlWBnEM8#aF7YP6GJt!Rh|#__unjW0ozUhdlFuTtjCA(2Nd5 zCgSRw)oJUOlD!YqvLRs>Kb+=}bWda3!dg)|^SK3srb%BO=qb3H8?^$=`4a|Y3QlVy zEXhO|HM4e1pUmQ3`R@)e2cTZAeUTPyBxn#9PKc9!IVK>JzEzrNIm)>cn`Q9L8jprr zr(Z@qOF5|F3-N7tCh@kgMs-=G|61bR?UVgiiT8Gb8aBrQW&~g_YN%Y>Ra4Y~u>WZ1 zc;6Z#+|()D)+`VR87A_j1z~2>FG&{H69QDXcK!o6ObRw#GDcV!zf@3Tr4z?I$UawTs5O*HDs*DBQ^{eR@g*S@(nQ?zb%=me|g{Y zS)Nyg25&PVjX6K`imn7exB2on=zm%NF8Jx}ws~#BmeVsfFY399yk!y7D~BVBuMs0z zaogMY9VJE^kO0-t^q@i6I&=VB%^1vFg=c$X|BP>w0IM{jjYK9IVe7G8csc0FiJ6q(wBLLUD+(-+^var%ABF z_*cd>u6^neHXxnK+TQeoDgXD2fPSS_ePaUw*aeAiFtsQi^d|LY@tgNRia}RC=-Z=> z6`;aPRmHugSI~30)h8S6?}a9`kN69@+LpRX!7O%K6w?WJ7~r@1uCOp5PqBcG>y@WS zkfR_bzxpH0q8KkqD0#kH{;^6BKa#nL1Ku9fDOn=JZ#5I-Zf0J@HGc^i@wL=6C<#=3 z@VqD}BQy*hoo#>yS--L3cB;03Yc6Q(2cR;(XD&@!3&FdiC+BAG&m>(xgD% zjO60c1KL5R9)H-N#ca{bdaBBAYM5c7-t7tnGSayc$u{I0&*kW&4H^2JQ{mW7!^+2D zgZWvM(-la{FnqbKj4m=wRKD z?^n{c!{Sx0Px_WOfwG~7g}h5tkR2I7^lpjtO*%tU{`^xgPLb0wO!PKeEVjkB(64H$ zJ@Pw(`Wc=&AFogdFIcY4T+m_e5PQcDpn3<}|6Wu-) zN%Dou?&^uOO5Y2u6O7vcpHy^+VABtmJd;BgR+R)jT#2~I`zvg>r8k7fEIrXv^kyw` z5>SoXbLa6j3$-S9Z;Zop+a>81D(AZsD@3qKNielSI6wXF8TII$*VWSxzUI%k$fvlL z>I*U?>CU8&f?;q1XQi!zDV7j$8jtz(;KLwW9gQDcsn|UCMxQ9?A~yBGOMJ1D`PYMXIpb!* zIZ4wlUu~$ZRB8I;JDO5qgJr`k23)GAsYi#6UzZ7^^S3r+CNgrlXt5c>dA8A|v#!7O zgm~ZDdVQzVt1A$$cgz%4jQ>WaozxR{W~QUaltbla#5S*bO0a}nWl?*V$xWp1JMWjV zbvI!c3&0{|N?IS6|2~%0fnynh-=vyU6_Ex5WwTw~qF#fXyvlJPhu%{fdwS7x#41Hn zL&7D}ItwT-+dV;QDwfB6eZ4YcvQJ!Ml3;-@?wdEJuCC7zqS>HEd2in|si88nnIYSV z7=0%(H|-+FJ+yfVwkhjC&tJWaCGzkKneK%@MS~+Uo#&gwrUOAy?Wcl-76QThU(C53 z1^~j?`T3tfN&#SmL+WEHt=B3)RJ||3Ojm|FhPCdc{j}w>TaM3tFVfOtFUMPUIVV65 zV>Mn+l60O&Zt46;K+^%wI z`bwGr;a63>8DeHkeWLY5@uXuqY{ksLw<>Rn(}cA?>V^i5R z;jqmR(C${V;bov$pCy*ug5kYMS$JMa{6{%@HRxaEXkAX{tYEar05yG{13Nu$BK94f=$gytwd=@XakDSi}Z%9!<~yfi_SLes)w(6C)DlIz=w8hJ-#$KUXjCOdXvEu*gj_C%mhO*2m(o^SwSR&~j4$=35(%Fi#LCHWfW}E%H-d#mVoV z#mb3vi@qz-?|S>P@*$?L>!U3f!|5ZkDdN##`L?;j;n|X!>6JXZ5@Y$3SwpXW_f~au zXkP4Zk4yk36C`&nh@6U!TSYGzr*EoPx1pQJJvfGQI1G>Ut92SQ^1S z#fQv`9OshF{Y)rycVHk;$O~2A#6|vq^X=J%eYmM z5_sG-@D9x8?`CYqEh1YI3yf5F?Pup^R}s0q!T49CuQ? zEK;3^U!|>XrP`;7E)uszKwTCUlw3$L>WcP;&p!E z?HXNqD#5{Rx|2ww$@m0*=Y`p0Zuf=xg~9StV&A7*>C^End2q5ewhKOk@;T~nU~|aO z_jY=~kqlS7jz1{b_B{ajbwXQ#E9ufc9Iw&rK(pXGA9JB(J(H3bkFkySA?Dms%((`W z@2iaS2Fr_e>?tW%X9Url4860qud9C}nPTgs&(d=F(=QTFag9TWY2hspt6N3ve-qaM z@+0~InUCwF&MEI*{_lN>lLy7McUJ+TjP{V=&C-V!<{g~>kf7WC{oV98 z|NSff|FEL}YR(qy3^J*#1ghSsSeNFw?3Vqh%|vs?qY7Hs>3Le@56I(}EZsNR`fWfd zw8RadBX6rRKLDu7*OOg-zO|STsq?4UPx^M;xgO*A!tEOsuG@M)uQPWP73Db8c^7=R zl+3jrh2u@nhswZn-Bus3jlgbYcUP|!dzV&iwwpvB#&#GoCVx+S#kJpcUFb6+to~nP zq50n+MoFIrb{rn=Dx5vr7c)6nmL#QJF_%1nySdPT zfwHJQAA=;!5v|`WcZH$sM@R z{Ly)~+k>@dBMEWT=fMD)RlXqmS4XKfpe`M-aj-ESyEY=v$W*^w20m^$)05?=e9SmS zYC`m>P4{$IGuizxngq!d( zg_JRiDD2?)IAiw~d_;PmkzY1pyflxSeJMwo5Z)@G?B6g1_{6Bg|g0RfE({8 zH2hk>tNUGa>HR1iJH61m1PUoI_6zl-89+}te}bC`#WC5g1B2ZImZK#)Q*!3;GFCu! zl^#q%@$RkjS9}!L6J}ccHg4amvKoQbT&dgK5Qk9J>vH))52y&L=<6<{&-Lc!y8*RX z9rP^#BWv3M+!gJ(Oh_<3N^H{8RXv)+**^$Gl$zFr2#RK$pkgQ8N=Tun4PSh8hC>36 z18N|SeCqH^sA!_1!lwMT_S=zuZHBs3u7~=DX17hGU(FFZ&$F+GBYFnpSvGXnAa@FT zT165HqhUM%GF2fbXSjc{MvK<5CpP{DR znkCFLGcQ?2jJ1TDe@$w8y6t*pOJV5ZSD=e}X-9CuUdE{A@Ug*~%rZ?#*n>D5F}T24 z%7qq`w3)g|hPcID2ZZ_n0nqJxIvgIr=T4`kJA}HvAo^tJjqqdL+s%m8#qC7}e#}S8 zve4L_pD|Esk!0a8yd`Ay`v~=}N{>KNjam)M2OM;?q_Y)F`;#>LYO!3&V|&i4)=pjE zVgz%U2T!0n(xHn^d3+@A`xs36rI!xr<}(GA9?*pOt5!iJh5-7CO(68?x^h%Lv=WPv zu4vXwC;b_I_5*H-J(OH6Ca&P8H7~`1$ABzUnzD2MXdIEBX4OhTqq@SzY`V>lfRj?r z6m~N%GmgFRCgnEs@9O<#0fFx(GZ8so7$99n8_!b%fL@Pl_U^6oVL)^x`~vV=4R8N4 z#$^}ivPg(KfaFzv9cRwlyyVUk>u4MRL$jxQ*XSC4ht}@R2OLI<``< zR5d)k;xQO8yCi-!=ODIQNfenK4DnrVPq8Khpeo3JKmC7KS|!JBR2Z$@>SriN@j z(Cl#6itU;?=<&bjl6Y4Vil2=rk?KGO0!1M9N%s#Z4v_sxc(cbt8Vfk~j^yItg9Xb_ z7=5-aa7va#8?T?;{hzq{h8T<*Tgg&c^QBNfL9VF$Kkk$_svWNPjAPjVl`ZE3h#^7GoAG5)zWOr zBK{X*084npE8&Z@SKI^N6Zz6@Z|4z37cLb50Hi5SQ;N?~TSs09&5T*P9!lC!IV!V{)c`6}^Di700fOanLtN0(59t2!*>C8-gS+PSl_&24 z&~jg?q)XG!R4~J2mjJ?c@;jNt{F>Hp_<9dxuk`Ra2Zjo361&@jp4 zpg5i-8(#UD9R8;61=#{b*uLxQwuZbQ*H&TO zbDjthIj-UA2Lr03W282*U2ys=$8gWZ-ClV!0!OaCC7Capj@QGs(vL)jazhlM8W#_d zUT+Sv_#L^&OSAd5{cS8F`{dGjyV9G(j>OOS1o9M$y~_di;nz_447#a=V&K-ZO&QLv zQh7<5f!Itn?=70twso^)m=Q14U2We-88S|@4ifZJF8G#AGkb-&z2dp9t}^pcy)$57 zpY!#R$Z>U+;}x{lz-L=QX~(1mUm}Z{n`CJbLR>(_Y`)pF%qYn@!!krfYSK1KdB%n9 zb)@dDX}TBCFoYfOxC&c+9}M)gKmvf?i_KL9&nHyOq<9+E!?Hg$rP))awCZ7*2rI-k z8Exv6q8OQkly#KcN|cdBDt1#zykCZq$u(>Bmh96lWd2r}KVKr3$2E}8&d>qm=3I#2 zpN0TBQ6Q{7OI5u6v_LRSE}7ICLS}ReY|xLj{%w=9G{#P$&2e44QnvAq=IA< z9%N4cfUdhEp3Dj880il1);~xMy~8BqboO}#IBfBc$K)41z2M18UvaB8?#$(IEX*PC zRgUeI7RN%d{UbnmTEjKniz}tpU9C2p>X>jIedC8@2>+-#ptG_wZ^pi-)4(b{2SoRG zq>cX6GP;Au=8aYiV@xUbIQTOQAwQ=$VB58=dX}*1t!R845`JK!) zj7Vq_ps5@i05lo{#KnHe11XEGF4yVqGKZgVKT3W(mATLtI6*UalU5Q|j=3&Ak^xi$ zKh-7^y3eYn_T1*O9+2mx^@M!gj6Iuu3zC@ekC}dFA(#~B9U={Sy!+y>x#IG&+d9}; z&kS_5(7ZPcVs4N&>NxUbNZN%S4BMh3d76^#biV69|g z&ywCFwRHyokO$fnC4yk3O;R6dvTsqR-c^kzgl#?_@u5(sJK~EfA2{SrJ>4y4B{@T7ZuP*=4UWuj&1s}B4#~7Dd<0?Qw zuP3eNmsfBlEFV|P2r(S-JG3T}O;+VhGz3p&+o%uhsc_7%Al!Qo9s><}?1k#)9e`@E z_=R`g(Npg(-1L)dlx_r11O_{r?fB19fPu^)`{U)+4y(vh`Gf*AX`Q6UUsu9i# z6BXEo{{=8X^HYRNQQJ*ek)AE?t^>pX45!naoT0$ zt;kY-9@cunv&pX3v7$`Rnmtj^-qN;9pz?W;Z>!}D&sU^Nbl;nbtj>I!?MK*suCGX? zKoijG=$NI(#Ck?}6GG!DvDuGJ4c)POUyT}bVW0=uH4UUo{5*wet0Ml2LnuS>_$-7- z7#ta|Y*4i}t3V4~G-=vhE;W+&mW`>}q=CvO>?Kt=jN&0G93fX>YPFmu)|*FN0|UE# zeie6~&edmz4VcGOu92@eoO~q3ooI8qj_HGYUE<9y!Q~Z`rg@~BLiNK^IKgcW)N-sq(&-8BA&`*Qn$yv>czf`k(OrSItcqkz;M@6 z0IE(ty2x=gnx59vAohNfpTq(MMP`_}ujUs_NJns^lKT{Kit`WbM-GIua5FVx(W$HsnhJ7$S!X6ey!U|RSkWbQY zBdETMjPOsI2=1G|XR%Q~&wUp4_2@b&t<@IZCiW5BEYw0O;ko^H13+NX)4`a~eL?lX z$xqz{bZ~R%>m$Lz^7&l~5&zai#7prc&p~)I)pN>xG3$OgiJcoQ=)h=3DT()-3-CBw zvFH4655B!{(bjbYbehaY#o#|_UtdGM)bQ#Siqb{M1A@W8PM#v2gqf#OhQ+-%?NS$F zkdi3O-V*CDa@yJ?9VSoc`k^a2_gbaGEE1s&a3zXgrqR~k7wgz%p6j90@E6G5cG=kXyogjU-RlO$JmP=+0! zHiJktMjs?st}Zu1f=8|qC+l9ghyZqQ|18$-i6g~>)WBQu+o{;4cxC`|0?!oU|1l#- z96sV-QAje)n_|23mg$lHNXFUwMU3baF`!)dPriKCdddj36tnAusU!`bWz{6v*?b^M zv5J}E?k!jXG!k!%{MHW+2H)nm^umh>q&xm5e<^9b!2Hb9X-*Bh>l)^x?Cx8cjtW^q zCY1$08>`7oFJ%^5%qS%&)-~&lDW=fLA|TQ_UOnm}&^5JBxgKGfkl#{t5v*ULGphoc~u4~oCK2@3trfup%$!g%4$igj6 zBd57b_Oq(Tfo9oT6#zqqy(&X%vR;9y_7iI#()dc_Vu^hoMzeLwo<4_3+#d?|TJLCa;AL>5hVup-T(pr9tNC8!hfC98_pc z33Zi-b>dG+Z9^ql;i~&0rg^-d#`=X+xG4b9Li5l7k~AChrLPZ1hxKx{I+lK7 zEJA;NXbD?Sf(n-+I7xLT;M1ExLEu-z#eX9y;C42?mR@mKK~>CC8*QmZZNdlpD^}do zeI4>0d8*=)v6V*|#`r(Cc{m6UbPkm~>&oy1btT@eF`mE4>vD21G2(B2eE7H)CgBNW zy10Q1O*xVC!>nKYl0dOr3+EW?01a)w!D7K8;D3Tje#`0td7D2UQ27rMcW(*Qe^ohv zpLqU%J znSI{2QB8t~`r$~wfd$0+CKyoJr2(@m#Ss#_Y37(MOG^{WH{%_*ZpzvQq1qKD2Y z0A)o*!TnM%CzOin7mVH$*)&aY&Sua3`p*9J(ApX*n}7~SXU~!>LnVIj@UJLFo&r6^ zAAs~Gp2Z#F1GWjhp%{?OT}LG8!1_b<7`zsj%NG)g+#lc1bMs-Be$j;mR31kcSl>GY zAH2JFgyXZS;RXTTis~E+R}cE#FZA-`)DPO|Mzur}pziZ1foJ5=#`9dV9@n?K+7@7+ z5<;sahs7geM%$cQVw|(!ghce+X?)IB$Fi7fRY`o3TbsIwBvSnyTF0|M=1efKJ z$vb{>zH+zu&dwOun%cvvwm(B~ zxvcdim*LEsvBAtZDG#aO;aO+S{2i4%;=qKe63?@kl3UaLRua8);k(put*@0Y45e@2 zmRJAR^1?FyDmeSjG4y_W%rYVya%_OxwYaS*6ma*HF*(VPAc%}g<)ov-3VDB5w<3yJ zeIY9n<(zEZZQ{~=UvG=L-k?s$$1>eWyCA&Z2LXC$+z+*up)D{)zNd$NT^*3WIF~J1 ztCnR8eaUX9f|<}P+tNzst&0tNA_U=1Y?enu>XXlB6n*kF0w93j^I10R?| zez#WaP~WITrtyrtu)SfKx!~o^T%HQLqBELY6M5U_Ys%B{2D)mwx$}b!MhPOtYO*H- zj13OCvtq~Sl-2l>livqZSL`{0ocrUJG^YTd{eF`D^e}jj2tJI|m`Q9RvmX9&Ti}L* zuj5bFpttWo_vMjC$pxlCw+QShpDEKm+XQ&v4sLDxp}igvKLTVLIy>K%-6y~8(jEz$ zXq||0^s1*lS2CN;Cp;Z~-5cfu!RF3>}-AgnX0iZpNY;e$QG&Y$X>_a>%P#AgMf3#k&P83 zJtecaLrL9|C3y5RUs;Kg`#{g#8(`rk*yuOOVHo9|?)M4t6Y^pv9x_dpOsh6jKT6!w zUG04!{Dpvh^YPOB{AVmG6Wf$2BnYno2ys_6WYt1bNnyACmcpcCb>3=bb$lqgcepAy zXPLq{mADgkdxvU~XoQoYsc!VcCf(C6^IJ?jrP15(Zwj`QlMDl2$-~2t4boH4g#C=$ z9n#6YwJ;t36t|v=h^NF(kcw{VsN5|9-SNHFf6b{=i75HvC?_8O_Xd9f?x_so9qnxQ zW;sE#S%4;r0CHW9;8gptO3lozv(sNll$_)6*3$z( zCJ~Lma6s6d9X)%goW&kXPOHy+ObGkO8gek*-fCvE1220SHzPdf63f_AA6_~1vYp=7 zGt~eSE029bG!38{wfGj?n;p0)Q4400a`lnB1p6eI04}V?hD)n~UFQSy=d}`d?kVq6G&C${uH~1N!p_dI)&n@K_1WDq6&-)SyFtuUa&!ysIIwjumAeyl z%{kf{OF=Enu?8Gg8eK#D$K=X?4l9O}ot(xt#S;+&h_hQHqBg{WHR>JqyS8-J7i`7A)?(CxhaX8hhv z%?&<&y*>tf*TZHNW$SNNzS5i=a%Yi$_T{ERA8Y_l5q|pc*mt zWAtO-D_2`BAEV+3MGh|7hul%0t{g;Vyz|(>(^p*igvjq)(kGfTY{>Cd4~30s^ULUN z;4go&o4s3ucb81+hu98}dhQJY<)Le~xwV4^f{xSg4sVTU-TDvB(XnTd ze`BbFUGDz>!%6kqF9OKnmlaKoqYZ; zN$4KXkl%kTxdN#{i0lJ> z&z(rxx)(UgSco@TnmfMDlQ?74XTbs)J#`BQkvt!Xz7H^v@Zo@r*lp*xTR6z@ewPb| zoSclDAKlplsJW_M=8xK2Qq^dCl{`7vnD6cc$)o)B`*R9d=lw90c$wR-4CA=RPl>Qt z(7!~<0a{ZW6Wkh(V$ltt2HW?cLqw0$05G(EKYE{8pyIdpRs4#N=-_B7-8Sc_2$}fj zwo4_ugXxpludgRw=~O>z=U))W!r+R%Wq6_HE?+ELBkAD|YVBVI(}RFeS4`g@9d>|&?gZx#zsf0NDI$69od?Jw{0wz(tiQ9OR2*6GZX@8C zqNMdOm&7q0KZofkcnW~9hrNxH}UL$NdbohuCjuI%ByZX618)12({}?L4^z=;=>K^yv#>PWvir*+G`tzR-QoC#{-R+t$5+Tk(7g=UX;j{s-JOd}3+^m5 zA3{F`fO}Iri4J3kID5zng)@W%*wcW=P0MUvaaYicg86D4Y9i)616MP`HPAO4mx8#e zFPYtvex@R$(QZvKu;b6hVsX8nY}uKXKek?1JUw_qFK6+HS@JN#`DT*@zJYsFMW95o z)3O2aiBDJ2Gwy^;Tju&m$K|-!?+{43S;^lAOdRJgOxAXb;hVCLP7&%Q#AJ_@D65#?GwPYaj2;s&ije)g6mKn#n6=NYQN_L$S9pWq@(YR^ zv#N5kplZ3mVQLg>bYLuiG2{yh2>e9kL8p92wpGT84dl+OB2T-e(3;=)J=<>cDR(NE#z zY{TpO!eMsmPh_zXD{dqk@9G`}(`l=-m(Ewrl>E(!Uj7Kj&3TapP{YM1_@#!s%<=r3 zcHpazLtq+z2%_psXWiy!(2!Kgn`G7?E`UVB_xJ%ean%I8`Ey)h;;{Sa8it)t09Tiy z{Z*GDXJYSCGMueC%6vRBx6>L$;sW#xK{9CTDe)qOIDeqP5@HJM(oY(NgtN;;ZosIX z%wR=wm$Qz%ab!f7y(7z@*7;|4RwsvJC2^D^zOof@xI!8!Gw%nivY+YUzo=F zUzkQ8Sp|IAiTS8FVUFS#T;zq)c5tp3Ze9vhB7oI$55Y&RJxy?(fwS$gR6SXuCmY^E zWw2bFzkdawOKKPm($7hd0n5>PNduh2Cy=*@J8PSfrNzu=zR>ybI8ydz6_rN$(~%d; z26QaWUGHjsyB4hHl&b70mh7{ef@Ohzv*x;|;yK;wGh4reVpVB9mt%?#BFlRGOQ&UA zh~h=o{@$&Gf0$H%CuAONbbyz*Jsr*9G@xOmfyL2Ld46xo{iHx3{GLUV2(*u$x}Y1pXtS_5$wj-aqRWTGhK}= zD>lJrp>3c|VP*N)D0s!9Dnd#0BFhq;uHD1s1)I(9{EqI;h@3f&7n^(;2?AA{<&Ems zNeL`~jP;=uckr&~nKEBZ(~B%#DSZg?hYP(!N3lqY${9fCp=tr7y;o)x7C&a5!oZv7 z@~l9YAOz9aPNo$Iq+$-h`HFiMuX<-A=);U)$ek}G)w@DFhQ zy|4hUt~W^!cY2twMlZXT^mV~X5Rr>(_Vr3b!Uh(XVSC;lyQR@!F5L(ydvEB33cLKs zh0Q*-IVS@3Zq#m@uDhEy%h&LGH0{K)8jx!q?b_0bvVsqv^(92`8_E9U_0hW%K>}2M zh2Kvu655kudk8y#a~k=VIO7K{eF9sC+n-@J=qev9#||mQz+W_zSYSSh)Pd!vrWP?l7mdCznZz; zOKEA{k=NwDT-@u)D=Vi3c~p0s$eN7iM~Io-#qy1>*dJ%7$(2=-HARIl_6&|!Bo^Ge zJdMSN-3ASc20xC7MaG|s3L>D~61`7iGpQSw#)lPr1F9>uH;cmAls>#p`b!*6euAgf z%N<7h;f2fCu)lXYmrN=D3M<2d(QL6BU}B{YY)V5v>a?lr8JXBSx4to3$Fup z!KJh`eFh;m;Mqu$OX|44f9Mz3=$G3dRKz_-@#CDmIql`-^G9V`Ol1a5k%m{i=kwTz z829qrvUAG!S}v+YRbhX=On@EFD@fKbVka4och>kA4<43mYZY=d9J z%rqyzf3~h|G7%7hJwmD*KOI(FY&*bi$EQ01Y#{4-JkHf@zeg34K;bNT@RM{!(shA# zyh3}_A%}+^owwk5QeJJGbUu>-{fj6C1`qfBNNN-G=~0>8suz&L{f?-~?xb`|MpthI z4LR9WpH_O%8tbP+g?!EsXT5^Q`&GRrF`IR7Nmq?s4+rjsMT_(i4%b*B+-gSWuaEkz zvpOjg|IXhcbmL_humM~m1atk#iR>rNG+B&zlrF8xfpc&f~ zB)+^6Vpm&G`;bi~m8Ts#`oJ)*aDsTUw$fzH=(WJL?H_soLOV$Pi0QEkxSVM|1q4+q zF?vQnNZ=tbDLVKWG(ycuTKblFqm*=c*%4{iciUow7hRZmDv3tHO&O{ka%a}$eVXwd zd|z=7)$uYLkq}suPQW-=cCUa~rL1B|Lz?ckf66PBTB~~Qaq)YU?k(HL&H7iVmLiX+ z&!+R&JHwbhK$U2h<0;_}=yH!!lLS9o4Js$RM`q;DEbjcVMrH2>b9Hb9DXQkI2(>;* z>FGJ==Hxi(vD$ttcEt^2=;ysKFrHQin>DbTMGFo+HGHet;;?*fDGRVtF>#kpf~i(H zV_pU~FS&LC3peIlS4Omkj9)ugvb3)n#Ta&^#F#>JK#=G0r4M#&bD={YA%xWE>Pk@s zbXv_CwxfKFv;kGYw-^-kx&a+!UmwcNYHsP0hQKr!y3Nr15!$ucI}=6cFTX6)+wa_#o&z|#GH zO_jq5M&VeqP)%BuT4i+mIV(Q9l{tMwj{$Ij6HmIBOw9p*c?FPfyn?a*jFXRuDACg(SgpuiBlL(l3{@o*Gim58fS$lkw)*^?h4D zQ$VsoG;dRX|8U-7+wWv$e0m$)S;8G8_alAKG8vJ0BA%!H~&R~ zWM4yHBdG9DO0uJ`6}~OLD^yXQe$M*4?W!1djm;_S#dwC(hui%#(G}c7Z+0J>hkUZz zS)RTQQ%k^4%1Hnfr+dDx6rftQITn4AGeDo}l z_sGXXmu;4cN;xu8>80q~4Xd=|D~mG+=QWW=J3W^p;=i{#@|YT3R4wK4_)Kz>w8Xln zRI&?1y#-O`ow{NZFSL%SFkG>$Wj_)Biom?0oPPzTy&`@^?1}`Z?RVj2tJumLqjSsG zy)BOwN6i^eKSD7B-cul_vcbBt<=vjHT54kk*Tk20^htltadV!H*^=DsZX+)LU`v8wP=J1F?1`1D-Si`j)=6riajxLDLwgDWv`Ln{Nc;2+HTZI z_;yhQ)R~4kEYKJ1_l+vuwzEc?#9`j5ddeF5-`gTUI?_LmblY|XA89Csk6ex(2`K)| zbDX$}atzsn8NFeDEooVsS1{b*Ip>f2AX4a>HB;8%~`dN z2w|_ANr^;;E97o{6cd%Y69=k7&}XNR^1i{C?B|KR;2yNzr)`fy6=zb&4%TYP=Od1W z+E*0N#iI}7MlToGo*Jfq@krsfWqwYwLnP2cJfo-fX6RT*!u)(<%TZEOE47jFE(7r8 zvv*@ZKOPF2#IXKc=p9dD(vui)%tHOX5$&TD(7i9(>mrd9;+LJozIJ9d)AZ10!{3Ya zC0iKB0t>b(dVf%a+X&NX=;q_PjK;9yZqQzrvA8-I=VRg{n|8FA(9>!-(eXFuIhInA zvy?K1IP;55Jr}m)Rxe`uYZ>I%vJ$Pe$<_86T1pc|`V@}3nmmq0O3mq)7ex87#^uey zmy`P`DJX_ZOzE7Qv|8vtDSQ09O^}RVtn8G+YyGyh!s}t65@IDNsC>j#ohQLWe#h4WO-} zGu&eRW#%z06I*j61KihjNB2u-X6aq{PG?0`p&?(50vaEtPjlBRKGD-P?5CB$9 z94~^9i?Oa7$$P+760{f_Xe-s0pDbZ~ei$D7U2=5*^Iwi;CklUZo2}#x44@9_p&%Rs z^0kVh+&~(>E$j*BVP^2q2%y>G2ko95$5x@-ZN52WA?5F>8Pz^Y(C&;!y}9u5wDV}k zslzd3jgibi+i8(^lAU$Pl_w8|#?a@*>TwL&RNI=KFZxmv`-V#EP>MLz0Z$J@G-m^j zV1lvFb*`_TohV?DcH4yOZi~i7gSNXzBOm|sBv7nm*LVqB z*0F8i6$}N_*$5bGRePckHtC`}`hfK`F^goK^ElEHG0E2#YGK@Wd5M^@=6JeB{bF~& zrNq%skA97v3LyM=U3rRWkt{!MCzR~Y75j7R{Udq_d)Pfhj9*V5HFg?j(IB~&chU_$ zZ5Kn+3-qEg(QK_9dmHGNCT&D2u@^fgT!s9A(Uy&lB2FvEv68!U;~2!z%31>Bp>tX1 zKfQ$%{yCp#0wX-t2K*KJdwJWdr#^g&{1jCX>Cj&(hH>3# ziEE7;>SSd4G8*=28w@@yJD!TXu+zeb?T=nN+W9*)`?L59pL|`Etz^EnxcSuh298N6 z)y08W<&Q*kXp8ZzN~4FOkSdExViO9d`wv~Bcr1!PtFQ4wM-8a~!{8-k<_P=NX@sZ- zvHtM{{}qCXWK4{-&OE!awOp&CL~G2MkH>0iLVJ(l{B^w9-z6{84;w=JGYLAu3&BM9 zBS8HFm6C4Iymo%@Ly9QHio<2EGT$V=lO~h(vjr^JQ*(bl9lRfUbn7zppCI(_x8r>Q zNrL~@VcGT;o_Y0Khekzkx>aQ5!zmq_n>PvgmwL^RM zrP#CKbSH!h_WSzLx~qS^-+vDQ6%+%9h{ekjke5U8{ZB??cdxZ&4G{t_M~;@kzP*(D z$BFag%S-J$(H7`e97_fXm}%1R-c|W-K*vwAl2p=dw`Jqs-#0lC-W3VV%u`k;1ub>4 zv+;Hsx+lKg{p6yhaGW)&233@*&VTfDwkW@xI41Jw+mAS9XkHM`T4=)xSEEh*{ylI; zz4;V26;==?#(n4{=(b8B&+8X<{;UCD^n&-DY`-!dYx`W9(mYezoW((&vuNU$=|m7*P>0T zYTPiZldGc}ZKBXh@*V6>Z1PC$LiAZ-Fd>7x)qZb#zOhSAN4#X+p2YFk_BbegnD@C! z&E~w_-YJqN63U^QkxxW zQ?KBNJ^WESURIBFpI;iU^<0VEU*>c>)kb)1CHKsJ?fnw?ijrQ^mCIm@!@Np-;@}wTO?;a`d`7HgVQ0oKzvx` zI`^Ttai-bg5IF5#@fEPOLNM5~p9=fCzA054Cu}!8j*Gm?OkzRRb&;Uzv)5v{(UxCA zC7Qq{I?5qkha!w?C9&tbb7&pnU`(bM68M@+xDWW1c-l72% zGg`@gg;%q^0Q7_wRH!oQ`$occwB+_^Jt?=c_s`Fu%=}|8Ebo|g)pe9EU~gM%M793! z0lNI;J9A|;_MAT@U4N@=Y%B4Z-E;x#hH{(KG>&!i=D_fb^Jk5=0LJF(ZP`^WVwpR4 zI3v>)4a=)w!)ye2?}~cE58p~1J&HdcZH0c_e;-2DcWxgHs6m=EL__t76)rJ(vt<|mE8JW z>CmVW+zZuO;8g-nNJA`^Hq;#9o%mjilkt`4taZ`ZRqxfFKL$B%W!AK@nl__WhF_r{3HQ>H8nI0GxKH5Cu; z&R#g-4m)8se@@@E-N%T5y~HfsJ@)}S;j2#0K8#dEYgS^k*9z4nl*aY)~fRUAh3aOBg*@|fgENh7dmR2 z;>84u#3`K}obE7ltlKhH`~{@z0C#TQP{<3?;v%|$fmS{cY035GIK6f5wtskCG#*$d zXjMtYRJxUV8R(^DcDl9ov3BjUlp%V<(=q40X!1PwkoR@tGDSHu-ir|0Kf@7_z5DJ3iU(J#-!YtV&zU7b(t z=o=(GbC?PYub#PLyhU!;(URN?8{sVgtnD)QP^}iDwf1JJ`g*{}i4z-e>T&!&5Y=%2 zd5YLHb_O9{!mwmjJ^g`}o?ib-A(s=VQJGm1V7|8%cX?6mKY@IxEpULOh@cj>GqA-M z7l1|}UZu*t#Q5FCB8l~wbAuOZ(Z8sEQNTya^ZHc4E;h%~O`c zNFqF!kCgRlR|6QuZ-UJ?-ZuRXZcZ*pd+DGPZ7Q`CCUe^Fwm;CIv!?8@u;!j`E#=>r zImKVK(k{cqd6qVa|2a(#I*u790zQZjmd3y=Z_nv>nZGP|i`*aE{UN%0tqZilwwTu8 z)w1n?voV9I_)mfhU#dX?uXvRi0U+-lA_JFXp!QJ#tq&zExn@(S5-UxyMzYuGhb zut4`$Xo5g9ll#+(B1&6x%^EgbPVj_X#Do3$9AD>Qk45QukTF3=uzO%Q@e+uzC z+-YOCe%BJx9&tjAHRk(Gb(2vu-a_VR8_Gq*atT3 z8e08Yk`FhS4=??TBzvD!aQ-tr-u(G@PTPp#|BVa%|Kh?$2_8&=JuUZMu6cP@;E1cPw>@R?i!aV*LbMGD2 zGrBOpsFLqI`x0R<5SHycDYAs|FH31uj@B14oRBO+V&NZ3QDfQ*D0 zAYqnG2rDEAA#m;+w6)LE@9%lexz2U2^FIoo{eHjJP15ig)7HiOvrfO{J^pESkV=#Z z?1$ISEO7=_?Y+VSyEL~k;@(5+6_)f4I%8EXF{c8*O<#{g1=cqBm6Fuu?vtKU{N|^P zyg5}r<6uV#p8|X`;K^gp{({&eyKy2X<5o-2zIMsPncq|lDVCcb$;$$r=Q$rYdlTSxLNlFZ^j8fS3Yy0#+$RzLNM*`*t-gOxyLa!lvw56J@8jB($I z@LLfsMgq^_>5EU6$3|pI)!Q-X6YGYKv71H1*whGxj^rJuUTj9nuhMWihsJ8+fUpQv zQ{BHS*2^<5d<>_=fc3>Pc*C%;sXayq==o+(u&uJKvmGTVFTI_@HA^DAMZB+bohIzH zT<;a68gF6?MJJcS6vrd+}#+sl}_94A@XxxxUKkJ{SDFWW1 zhK{5NW4_!08Sm9@^$DZk;yg6IXkhHkdK`^39uCi_seK-WytRSkb1CWYD?K=wy4k0! z)tRwu=?1k=HuY+~&!{9w!3X9Xgu%lobqFt#cR1zM9XwV=GfGdxj;(*0_^lMtF2Ip(}dSz5@t-J@Jr$XrX^)3UIvY>N}FoggOrxw*YH#(!LAJv>n zF*V)j+8@i2ln}OMHqN*%ac>>2^IRv_QVvA_Dht;E8shf~x5^{^Vqt32tZ!H|oWlHg z=78n-^J6jCzNEKnwtf}RGL)198t?HXHAeLJ6LEXhU&3CD^2ap z*~*!F?&)uRANaHo>-+F0Sjsa@u^+Rf(8=&*tir{?8z%{5w?A&jAJMwh+&;MI-{v@m zec@uocR9i|k#jmU#6@E{jYA%yB##e8V^Dv@G7{FbNuC{l4Pn9J`KF@u?$pri^6T!W zMs2W$tAmPsJ`vy`q_l2p=`WO8JV*dqK$jAY3hp1|fkua?<)ys794!)ieF?j#4yL6#)SS1&sO-|6LOQgsv|qdF!&maz>FMK z4Oo(UT2+?uuZsumN4YP^5{em*;F4RmyFEvS#2bsstSeEOL(tkCoj&FeVfxratG9y_ ziMw&Na(zKJuTuhXHNMTEap!$1YNmsKE(G~8$$o4HH!V9y;x{sdn9tiWbFE`$ww zaMJ)OCGucVP0OnjxCNbioBTr4Ju4|;c5Mzsa~|ne{y;4~6+fxN_x@^C!slfSGKo^J zL7yLS0KeX_Q=Kxg*{l%3p~~GTnL`#cS5;U!NG=cJL|7YO7NdZ}vW_W(drH#93b-!h zzqo?#yV8jFGlEWc1ks{(v1@+)LRChFY;naNVUJ|G(o-I+4&sFfGri~Vz(>Nh=Dkp? z%@S5#v=c!dc4<45?$0j-^ z$f5dJ`})P+nW{3Lu9uIWwQ$UPiOdB6Ey~DIbnTW6_(m5;mWfs9{n`^l^9lU@(UZ!) zU&2=*pXAWDvKAz`r2FB=+PHOY;V*>;7BnF8$LP}a_Y3jbYeU!|v**jl00@2-**3H> zlo`F;lB!hD8Ma23w@*-7RoAquc7^vYbk4>z18@{)sr~r|*j`l@VX;&P!4cEdc(Y)Q z1*ZXjRhQ#OZnxCu4iR2}K#}S~Y{^zDi71Y^p#k8n+CZ&4@PwUgSt%;DS|L2*s@jG% zLjAa3H;PvXUlwr`&bHaSd8xO}kJWUYdwmR6FY5iNSQ;^otG1azyhp`rAg6HGv@+Kd z6nG`thj>c&uH#O6`lHU~| zKZi7w!kaA6m1gqHu4NbC`hD{C`rYfDV_6K3zZ89SI&X5UdJ$vRRHYUwQ%i>#$U>9e zp+j}0whR)zl&r~%*vRlJi9@J?;EId-)SSsIw>GvhKW}a<*$7TrtR6MjooO0L4-KIq z3K^Uo(FauFK6XoaW+ZF4dOn6(^U=$08-v#Z_xm0rZhn4auU^`x?g7eYA1RFc|1G%q zS7kK!bKPHUckNCNen}k&;;Dw?C;;U48MPf4xY%0_K(B{J6qKcaYxCbQ(1R=whw7`b|NMRs@ zuoH%^*4Un}CaSv6=8T-H_d#wDNcE)hiPxS);B4*A%czwG;?FVdc!7eZYbhhns&>sd zK_2-l#h+d|TMaiTwrH&jnCu)|WZ0PLkZi3aKMm{B@vRQZJu_A3(fhmHonK46vF@i2 zX2kUHUkVD2j_!V*L=y*g>o;mPEY%zhH^(%#=W0EBj9wS}Q2KS7trm{*`u{K?PIfo4 zD@=huWUvW2wVZ>Kv4qtR$?rU^QUPw;o@98F4uEgv&|IU!-qniY7l@-N0TF72%r(I3 z0B#P^`7L0$qI0J&OzIv)@y{K$>YLyjLs>&RPE>TqPGc#jSab;f6AbT-b^tPPk2h?7 zsIE^n)rN-tJ})sLp=kv+5)I0E>|3^Vo_X2;m~vaty%i8ae!A7Y*ju7Wq2FSUQO&9V4{^A0>hWk(fIXvJn|f%%aGR72S9-v!L_FzADJF@ zZ-oVG23MUToLDJ*z*wS@2pmfw{iJ6)=|1Uv>@dL39h-@0QfLr~&pilDg_7MJY}@_>IJzOM zrGUp#;LEiB$fA)~vttvBoxX5B_cqGiFD-MD{~GV3hz>^Urn{QMk`qIFD~UOI1w277 z%kp6ys&_b9uo=)(MR!~LXa;|*n~-K@b2oEc8r9nz;}rUds_(ZC^!!D4qw=gtJvj~zRo00KPoq*Ezo6nnMjR3x8~JeLob7nk2{!c+IYZfWmx*!KRjEO>e} zwncsGB8iZD=mUJoH9yF$pPZH)C{v@leraS>dGAsHia{mxw0x@)<=%88AM#cH-a%I! z!;DQpP}hYj(kL#M26xz^Wnmh_PI@NCw^l>gK*Zw%rdqME%A%um=ZdnX-9FsWomng5 zX}=pDkO@p=bHJAtx&QcJ|E~qt-o{lcRKkimCW$J_Bo*~sOz}I8@6R=76eug4v>m}T z$aB$qQU*@lrf)gU;P(5ax8IXD7 zBY!rTxguW`n7#bM>_yoSu}3``#2*Umr9$(91U1F|tohp^D}K9~4uEh3{=hg{jxCAl zEhk@CWiX-o|!-J=)) z`5nk9`Xk+tex=QRHc7=ERb(6#eSHUIzed z5`48$x=|4&*(#E8#*l9ZfR?Z`M-N*%X_K~WwA0a5J1@6?xy2B!u-OjRj4$sSAR6cC z3Iqu;?>QmfQ?3ahOQj1X2cVWZdz>)LcDLwyV)zi_y6!)2y5wh}&Y|wCF2j|r9vr7g z<1fxfmMApncj@_ZFdo|GT>=2GW1sGm=uvcoj-PD0WoYAS6teM=@cxvz72V*x0lGIg&S@Wt9|I(6+Z9~^fy?!Kzp_>( z(QrlNFx)TI`ndv}^h)-KhPmX(&~KRHGgdDZvWU_K$ZYV&(ijjS#e=J!&2Cm21U~s zUtBqp2-gZG5Hlka z{r>NoGgehs5Ox;)M1py{R+4_u-`&Xp)P2ngI#8!QeDbAq*f)fy(UmT}xzBN&0dLI$ zU59_%+ThgigHKwW{tqR4-y%(Bc>`9U^`(r#_FW9)bM9^}W3Ilo#D9z@ zWge?yn~v`OhEZJw2vo<7)WEGcedGQVweAWHzh#XS6PKj5+l&j&4&1e96>q zyL@zKq>iI7?+)o}5yTWgpn&pbtlR#N|1&1jlW`vC05r#LNA1Hg(!!HyCN+}Y9=QHF z*&735GFSOV@6aXiJ7b*Rm)TU})X3e?>7lwjd7?P-PBTMD@&-^wa~}BG^8Xt$g97oG zL)ok^Sg&22V)-Wm!}^=2B#Ta)<>#Fd=322PKa=~7ow{T>JUfj8krhZRgXc)R)lck5 zSSg@;wzdW|TQ_Rf^h-pkRBW$^@BF?VsX%r&!M&WW|Fgu`!`O?jR3RJIMMqk{% zxE16RMif9vEhxp3ZXAXj4VKpWd4bD7%87B!r;ok#jN5XBNPrf}3lL${qAH(9(joO+ z;dV?HSYiFec^Pu~2VT-OUwnN*ckq-R-Hdr4WF22O)N&DsnGKhp-qn8mNa1bLbua z14QTzf4TguES=&e6$+5yfLZ^1n_nt18-y%+3b2L;faJP!;2b&J`>tIO%cp=l0qrp0*iLsgMHeFh`j=XGWor7%A+ zLVGCNY~e5;pH>2+(#W#>+}9I9`Cc`$=qDep1B9C|i=tG7$riEq3ONDXF8jt(n|b3P zt-E6WXk|#uh6wzZM1>y!NJjs-3Y$#0s1NSuK zsUMLJiSzS(B@zBE#T|mj{peFbBaKcEEYN(}$VIMv6&SX`k}+Sj2i+6vBpRS{p#`&f zbM(lFq`btaM7GO7KHRi+lfM*=ue8X|*zf73K_rOnJIL+=C`Y&n$Pt!;uU8CW#lBCE zcun)ZdteG{U?_6<$*21KAy`~P$aKeHfB4^)OTlaHU+fkPNzl2GOp3v)VHtDDk&nrb1OJ;R>= z6z9H8J#avOLGQBW-Cp9&Jc$o>%Y`Wr`JA+Rth9z`y0ndbLN&XI>#~k0>t(4XqA~nY ziyWLAO9w=efbl>he0?eEI&!2u!El72jS!r7cnlQ;=iBm23l_k2noB{v$c6N%d}bh! zzUki%xK9o-qF$Z%$m|U72ph0eV!H=1MHrp+e%~WL0pr7pU$%3VEX|*153R$fojHWe zvS^0TtyyelUMkV(8Q4kSssX?xgb6*+5QhK-9mrMPKpKA>)pTbiQp5K8(6c+VJuULf zV=CnOi6Qo0(@ZCQ_%_jZP(i=ktqInjK+*L0eP!23LX(Q?kJX2m?Xq;@`%s-rhQ58OY(2C=dF0li^ z2O#Iprj3P0t12r7iF)c=pG%bltWc1jozTT|x!9w2c`fHi0J;Xb2m|!~9b)kBRSV;Q zhJBsI5T~TH(NP(IyF%V{RMVyCS>>Urqe+~IQ4+)E)5WX>-My0{;FTZbu<+qyATu=; zb9Z3JF-jfguXTyIDsr@ad;FK79)*I=w1?GmX&f}UFu(!PSjGCx#cZpQnEVcaLsBUw zZxH=t2V3YG$_rZcA-K!AGB-5HJ6991v%pHs2Bb^PR^%CKY&Konc@|9_ zPe2)dIToi7dx8*Fwze;V5XrmEdCZkb7CTL$mPn4Pi6)S3!qRBXf|o3Qi)TJGNDi*K2RTR?#+xiZEfRcO8t$W^a{mgHH7wNsFBF7;YWT!?R^Qt-0_U5L)@D2cCa(&DhG*ta|J z4f2tNv}mkiB(VR&SyO7M0XU%JwtVSD**s5nS0|;+3VOUp?cHlV)((I*F_6fnF@^ zBv9k&XtjAEcRH`Pu^HV#2ey>H4z!YolUjyK1j!DPL^~xo06#uZYqX&AMy_>Y2NZ}Y zz>okQ*q7ydYa5&|De(P7l&Y-wY+1_=_m6*{B;c-2HGaNUrg!N+nQa(QszcdzQ2cLi zkQD5~H9r#=&zFt)p0A04V9lR04fE%M>`-M+Bw?Ep1G6~a^)mRt7Vl7)JYv+;&0oyv zMav7SaEC||3?mGk5J(mD9PZRBG9v4e!mqKJH0hq#nK?-@&kc>NZ5fh=t2ifO0~cWN zdXL`JaRp%EWnaLi5723M!M53h!OfP{7n7O@2x10xQ~U4NWlvxJWXu6HJkA+IprrBj zBsu$9{re;;u&uPe5D!(Vpm!0427Q>RyrGGz?-V0T)aYX;1g;c)8cAvu$QYg=s%sgM z{=>7~DdaQNU7~95^}Je=oaPQOff#eB6oP2&$$eebzG8g;&-fSL<%S7Z(Cn2{e{msu z7EWaBzw1u-|J1LR(QlNW-~Nj7iz@*Df8cExIG^B}T+mXASE~o`t9l}cIE?s#kYP_J zSXYgI^ZT2b{61>}`xeV>Q2#+5^<#K1zwf1Og7Om3a9O=qf}|&YY@mLx{uvOkAQz{1 ziOB8#-BINFnB`FIGJ6X%gAOpeF4PvZuitKp5z(GEaHTHH3%ZpRSUJF1m>08kl9#Of z`J3^%^Bpogp64FtP`LC%h5jmtjD)@K2pJePKcmmbqOs(mYLuvLT>ef2E9`A9r$fqY z6DS?dFlkRPq-2EwUowBc=jn`4ciuF#3~;g3Ozr<48wdMb%=WQyK*-~q_ z6h0N9yA+W}ImSCbdl$}i*WTX;UcTJqZ8h93=ct{XFntHkPy*MG{6otNfGi6_wry zjkxqNz&NbfAMLcYL;?D1w_0rj3|e<98d!N(zHcNkLsxGahcp0~M$-FEG>Yoa@G@CC zoKhxSGj#&PnlL=1m$=nRj4m#C4lZ|r$M9#MykD$at5sn{QL77)zPl+UmS%e8VIUF! z_ zv>R?H&kTAg(zJme@-`FF?UY10rWYbpmiNsoLu7v4vZeq7_0udc5ryi`wFA zO0FMh^x&vu6F+12ogh&?yG9JMo9`__BSnmYoHeew!X_a|xmjHF@JVu?v> zh!&!^=O1Thn?C2o;h)yYmjiFd?g&n~A0@bX9cUlZNsSGN3`5BIY_x3QlIIA zRf~V%BQ^8S#dKvR%X>m@vn8Ai+sJZ}|D7(iRIvCYm{*Z>N7iep`SQ$5m5b*VhGc4G z2agfV*ya6G9r*$}I~;z{JW1{~bc2Rxqb&*r_2JH&_cS)N$NEpq#v4x6YpfF`UgZU@ zoG0I9_~t-C>RAC)hNN2ZvFT{lo9fIdGK>cRcTBgqw>{T`61CPzXJ4_o!|kjI80Zeu zU}pp;8QC$rKjQZ-FEUz)*wo+C6LU7kXu>IOE|Y@|^3#AH#`sFn?WXKu-C0tKaUmrc@{(v68(m%P%-hcTy~&J0 z0Ws4%%n12|UE6ysUg(c(O~c06jGF}xPA*$QQSEY*eO>812U6a5KJ&uCG;c4LveRL1 zV^}|Lm-{YlgdJ)j(pn>|jKr&=eHZEiSh$d5s= zAh98=k9-RT_mgKzRXmfj6)&7ptuN*!TEg@yo-6|J@24uGtF zeRTFXKlncVJkyJN{wVH%JV45b0B*n10u>d){cmxcOB?kQpOjfo<>{`xT=|*V`?Mq! zS|jKj^K0lpp9oFODMVR9t5V@HpQfYs`IjgkvApS}BBHBp-W`lms3&g(X z9BwB_@h)t0@aQmie-GGQK>Xuz;z=;5a56ZWRXU{W$jf@c>*znD)1n8VBo!?FH7xw6a_Sg$e^YDx@9uB^=2mLrz) zg*%@cta|F!*K&;n&ml57i;&wG*TT-TSoWbfx0=mFWZ-7*xXbLz4w$VvAozWpXZ!{A zvKR@0?ldfcX$%|r=s@n$Z1r$7tYE}Pl zCcyRD_@|4la7Kr2!K4oH*EREy^@2p{!=z62OOKI6QltoDH@v%lO7dV-rNuKaB#AO&4nD7VTy`Qz0>$6%p?3Y z<+|^AUFnCtEv*5^0Hv?7aUuXJ0ldD2g;v`v5UzsE9%NTVe_oJS>OY~(b*!W#Ia(fm z%vz1mVpQktP_JHiRwRj%FF^xz%U?`qc!}B1NiC&tNocS50Bc`AnZ00yh%Faj{07T6 zjb%oYW$4`Pm6;z~)m88Dfh$zSu?LZv-<jFF!>!S)nD6+)B|;e@R!H+i&|S?=imD8gm}lH-jE&5h!YM;FNk z*^!=BU#tgWLdMKccz@_%Sb8Wh3VU1i8hV>%_O-4z)z>QTyjFl%k%}$~|BSFnfZwnZ zTF<9w^3JJHx)GU$RT6}TZIotf zlen{PilMJEqd2<5vC1G0%C*Pr`-{uJL)iU;anc!N`srtV$?t z4!u)iF^W7$=>y11T3p8Nu`bud}%xl>Ug3ir?P6xB8$Y^w>6irb%@ z;YoKnv^L4&gIBOqiH6F=8?`;OSEqQri12j84g6L~Z}U`!fiTdss|`knaM23#AX9j) zZBEfzG!l11NN?o!;I%XQSKrtLUn4q5ZoLO)ioDuJU%h6g)kzfBltAbKl`;9N;~4<% z5V-Gf`kSiG*>~}`{GIt&-i6-YPJK|Ilx@Rri5Q8Z-HBT@jzhT3XbDi?A#oqi+9YiC zxz~+aX7LlH`A_AZsvP|^JeOq@iBZ6)4zPC=A8`{Y5g5{yEk(mLY6uFn$X|4cQ_8<7 z$I2^=C6?t34)m2zcf25rZ`qHSoAnnjT+;vR_$chVyWTyYwp5Yq$MC>_l#khmFTE;{ zPisY%WWdc6bw7G_>(-9z%$zT=&^L~I?gfT?ShAKolB(YVjY5ytz1m`92LPo4vu=_N zwCWYoMQX15YF!Dg4fRWd+uH&AYN6M=@m^zlN042kfmUl7o*#D8wH!!bM$5s!MnTAKT_uHH)wk|Dg-b9|Ja}UEqvQbjO_IDrYrJ4XKviU?NwQ57)zENZbu=>J$)`HZ;WKMGNF=MnkBL9R4 zDMj7Xv({3LSQm|dKzZ|<(j$w0b{GQLwpBLi7mMAzuKUe!MOg4}Da0!_=RWZ9JPUua z{ZYP<_UB9X$+8LNqVbQ&N1AO7lhj4#o*ORrlF9-;O%Degj!3y&&R}po>g`Kd&P`;C_Xf_B7*Ab(8*b zY3RrR?n5-yc&96ZYWV0ygF~Yd0EpW8>QUT8k1Ib;(T8T+c_w#+fujz0;rTjhj6$u) zjG`a&u6ySPj6h(;r@FUpTGf4#E-K69i#CtclzPQaM$+{Md^U&Y3XKNL;wZU+XRak` zf3xEOpG(gZt1crm!f53XAoMyc&*N?WtMrqQT3Q1?VzuT1o*+v)OmLEkGO-`UNLE}D ztaPMwI(SvbHor-x#>Sf!nrD>)q$@vRykx`8Hli%S{r85LmX1tL%Q!&?_Jv4@D)qA| z)XAUv^&J5Gpam)mm7WDZ-fmWsS&p++6YY9$m!G5|V{xZT*I&>a|AIG$HZQS!*03IP zBiX9j;?jltxdo&c_XPbyK^UA!K1^3bI2HbAKW+N-(Rn)&5-z~eh*YLS6hdjCNjSkS7|OpXjxd7 zKG_Rksd~~iFb#7@IaKpFi(5`1rr0=AS^DJsKe5!Sdsr%!cc{Ra4`z=uvjGmfg@CZo z2mTMXS{kviOY!4T9*x>duWT!yMvK7hMbljdX2&|x!yjFwZF;MkZEjrb#UhiW$i=AU zcNVd|GdIT}1d(Ao^y-;07k_vfVydOMSk(UeX?CJ6z9NLL~@VoUqB5-1noDKKsOb$7;>HSq^+r{~wq;HPM zBamf#C)L&~&eMvCU~eNDPMwBNy$j!0sPS~dG<@=2T-kgH(rND9*R3ETm5#V4p66_^ zJVcDNA2o-TPdo;*>Fn@@)xx`-j?bLHVTe(YD-ky%m6v8j2Xkp@a2>JGya|z$B8YIW zf_sfEqmwRiSOLrpk_PQ!hv=mnRuFX(h%PQg z?zlwk#i@sCedTt2BhwQ zNS=}Re@2SGqO*0R8|+N%;}~qR%7w~A56Y4OSRnQI?w9_A?}K#(rP^)m%PZPT)KA4l zz5cboh`RN4AAPSnMiGC4GOM0~@0r7yDWDSXTac_!b>lH+erb{x$j1pMy)4W$y*NW4j3!Z@JX74 zOx@$clwaf4#D-6pF!-)yw_2pYmAsAKNfl-nB81Wb$buy6-K^9nJ@^x`|7LZ0pda#P ztU`D%NhZsAk38Md02E$ApUu8ws0VC(tlbElhSF)FFUssH;nh!N*(Bzl)BJ_SXraIJ zo|}*evLU8R!sRwJeCZoJ?W|dta4*RL3ZK)V=2ZTUIfhpAmqn9 zR{6ZE`K4{62C#8L2J2QSJakPfY_!n=w@*(*$pZnFN^E`ZDZn9R8EiLWe5fyUug_un zV?{oQc`p!lf>DlE?OgF#MpncX0lU6#aq=Sxat>*a-A3U7^O)(5Pnv@y#(u)<~$*UB2Uj#Clf_MqL2Tje4YTP~23 z9GRdt6OD~NUd-cz zC03vRFgOgkHhzswPXO+GK%cLzWSCuEmw5cK+)tA?BN88ZOBAdjcm4yyy9)(FfA-{e z!HQbaQ?3=K3{IRpF({P-XM4lt>aP1B`0@e}s>!9AA-OOToTu;j(L7tfA?`txTx^b^ zz(GFpsycJOFm9_MotL3nkC%M*mQ_1Y9TgOXpb|^p*KoXgn}_=I^)c7w zQ}h$avgt)bE!{fq0HYEL?N+Iz1JoXUKI@ca%KI04t>p%Y<&K+`^u<=6gPpl_4*7yL z1@e#-6&)2L^s)*tA*Lv;ISxxYbSN)JD@bzt&5SLD4^^AtCkB`0{~MsY`DN?btVd%Z zX>6H(G#Fc~v7#pT1aCX!Z36>?Pf}-Vapn!OZF6pj-*^RGC@;4NYj&2kEKH)WzpfiX zwE_CP0|V?w9lA6ovD2?;ZRZwDGyr7#3@Hj(UDlaD>R{3BO@|a-T(k|e8ws6)KSS{f8vMk=2=6E)XbH<=r8M<&5 z@6oK9$zkolVnpZlt~d<@^~#2zHRBH>jnxRhHnEWaT0QC3PTIzV`L@2c>V z0N92pgSJ!j{8TvL5xrJa@sm%_)8nP1K@gUf(ZSFnh%oh}dE@mhO+o+mbI9qUdGQ+YCNP>`9w^ z4&>X|eoyD@*BkJ}Xi8&hvHkIrJ&GRXBB^GD@Wx9-+JbJku1erKHn$%Z!%U!zt6AH@ zdt;cL>P!7J`eE-Lf{M9QQ(YlE-il@L4i=RC+khP^=4`g00f}|MJ0(ry;y2B|W6XuDMta)jaGtzn9eRD!0 ze`(NtHn?M!Fdm`1(|HZZQmaRrXn*+qe3UZFM{I;#^Hshr>q@NH)$|Azm7Lt3r^>`) z%b5{&@pD9}P9;CMoKbVOjex&5`E2qeV|zSIqf098?~6K($45N;^!KNyPFMVitA6!j zH0Y`dLBjE&#b~d0A-q{M{xqvLvh18Nzh_4Xipy5K=v}y78#QGOkU|5JTWywX;!BehhNNfuOr4tq;yd^tmlAB6$JK1k?-Dsn zIAJCqw}vFvKWwd=Z6J1D+*@#r!RNXT>_ZJN!K?lcuC2qGVjmm|;ezZW_Mf<4Vb2&Q zS|ZaqQY9mcZd__RchFW)o7BrLZ&s_Y{)z4&DVS*1G(evEmNV|DpU^vh`Q*phU(UZS zI^IVkICV>tS%XXqrP%7&KJ{Rq+U122B|aEyT?#(e#^tyQjLc9&61q1uNRQtKlNn7b z)-af+dvNdQNw9>3TVQU`?+|~5>pPyzOWb0usAY03a&EQGemKX-897RmpJZr`VRl~a zfoFQVEnphbH$W%g>!M}~U(XF9D#4Wq@Q*tpXDI&}PaW%QpgLD*$5fvMzJ<2u&Kz<1 zTMzJ88JD;(7MSn(ugtuAJ?nmmvXjbx|7ZV!oBuoW>!0-dztwE9s++?$)sdr{SId|B zu>AT)pm^wC{(}WykZj(2d6($(+Z#mV&u!Vb%yhwa zG1Z1k%QcI8Mf&^#DF4j`EPeLsymRGQp+sjq*t$gt;W6$T$nJA+pZ@vLYytd535M>Gb4 ze7^*zQ1z~E4o7N~bDs3q6xW1$f?7m|ZdK3I=~Fwm;%H7Wo)z*XbWb+&d7iC;t5u9Clf1 z+OGIY>DQaM28LRzqRPfu)a}C5pIiE3bC*hi;W!~I;&u0p2$!IM%ky=V90i zL`Ls^eJDSR8COj}{OPA|N%nWPdYk7h5QDm8H1nEDt-)`~lZ%J%Fglb13A!rF##IKq zmEf{!*xHgpiYpXSBk*apIGX@$@>VFO#%;rwWXN&WL8EsLOw2X|_x4NIg3=wnlv;N3 zPdruTWgbD5kBdhb?8B{xy<+9X3q327wWSMSpdl6G9T)r@B^_S*i#_mp>v1;hb5sRe zR^fKTEtxj5*R)LJP=RGsbs03C&u+-7)fg7oIZUDxt|sQ6kNm2+1EgdV0u%g^*sVVExc7$1?P`i?%fq>2VpO!1VvIltPB?f+)KSU$`>REB@oyePVM{rzFrOBN5@zl@3Y+mqpM{A5jl# zMllWoDvVGidO<#TJoFUtm?J4UM|omupZ6HQhskL5I>7NyE>J3j>=W3H+hVcqaKU@h z739dDBoW29Bp>L-Z2pgt#}gbH@qJ>~ZI3~a{u&CG>FqrZw`9FhtaJTW+(>4Cf3t@q z@v^LWUk@rYC0dkr1Na9g9@&o!PhTyHyIDv_uu-`A|LhWh_tr@nHpB-qVgMBpgH6t) zAaXjN6kX!nUi{b-$Rz*~R#v~tBPXM&1A@3xr#tvuYFz<|To`+A{gW?BYSQ$b)bkAT z4@A8A@&jNVE>=uIq_AuE&+m#LEdfGv8{U>Sta98_}AD0o2vAA zDo-t~5y49Cdp;g+!uVu$@+i-^c?}SrJgpl#YIUSHP)i;4cw7rfZwc*4*cl3R5t+2B zUa0H5q7*dObHx`vMz~iwXjOd<;Y7*>ES3ut?mZ9iHXw>_)L&qe&yffa9k)sL8evVL zNjGEbFP6Ej&JBnDR!i3xwly;#xme94!RIhez;#}L1I5(9i5r%m-Asz1CiIbmdGkmr z-UYM$zJ^Ne6z{i2_h|)s$MSpS{ekQh+?ae7rN%9!t>d>1lehuE)ip4kl0iB30b4$h z;#sZp*DCLcJLM0WNcUC7+e=Q#RQl!zc6M?KImOhUQCA30<7`xwCX%W~=~#^e{EapK z)XB!0BU!b@6=#+^0TCpz+#?9xfW=@bvOV@Nd4|=?aren>XGfEEMEOI8+ifnbwUYyq z8w4@dnl=$QZ{h>b`6YucRPJ$)FUtrJpGtu<+c5|`ocD)|ENIAVX4$9Bm6uqWlOWnB}w)?WO-z@C8pS0VKs76^=6BKC*FIsT^l74qwVk2L z{iT!B#YGs4o8H&i5-7_3B5*^s{S^a~^gPM?$Z6%z0CHOUU}UsX9)2foZnejx`Rbiiaq3(3fwQ$L*Zl{60aD5v`xhNf<+T{o-q_6$ z_lk?h)FiQ$sg}dicPlTYL#1!;Nlh!JWi`#85n2w@q_2bMD5x}j6x8{dH-vp4qmI!0 zP;H^&cxL_2@Dg>qjv%90|AXU1Cj)T-?D?%|F z`@EvC6t&%h$p4-2t@7u(EA}`{XROV98St(F)iKr(H4&gN+x$xEa3IXM+*?q8H>cgd z%>>72tzql+ur$|J$93k|6s*5iUr-913aIlgP)F33df@_F=dits?UnQgd?xnEWjO;X z9cR%zlwNT}M#M%Qp1e4#o>b$lUgt%L@(nj%&P%33d(iuz3%ZdwBu(cUk@TSm=vk z>)%u##;V*QsTdhrw->Jm0&Dk8m@n|XlYQmD>`kaqdwubzGz9*~UtE=9cR;=Le-y%& zpZ$`ElJ#GusQ*?AgMaluzc}*0lEMPjvR?{vX>$ZtzumtSbQHQk0P`KHM)q=`zY#qA zQ+M!K*=Pix*B+SY4&a0otjdopit7s(mg4cuf9escnBS}7$3u1j{IbPzr*NbNump~P zSmg+#?$Pk!t-Yu>Pzq}W=6vsrBRMJR>xR1?h0rgdZyKOx^v$=ht|E{Pa;X}QRub+7 z^{i0-Vz~Z<(DeSdPKX{37Wq($!yorHaulEgmOw3+Li^hs%6oW*%Z(JzWlmj(hc zZ_7v{UkY;PRX!4JI0RBDJ)59*=w=}Y>m<`^GYlyJ&)zK!-04PbMhezb3$U=g6yvu2 zaOZkXEnL~=$_USfra1Ff9I^ohM2?W~dyQ6>x~uB;=P)lr^$p{45aI(8nSeQv2;vA5 zAT@iCW&nBXgr&d)mv>+B_8-YvOBmjHhabo(&Kg#p(jwoh8rski4(NIy3U>5X?6UZn zNnP$d6)^hL4%kmI0`>tP9GEBvVX)DN%71B6wX}n^{+6T;Mr2S9l5dWUcI(w+mjAbE zR3HJ9OKyet*XrDg(%c<06s9fFmX-3L-_-U=Sxwj3mjJM+7r(GpR={w7*>Tl+z-tIJVf1iKp{|cxd7{YTrSQ-dIyH%`#;6Th0pX$$ z#5^Wp06@%LjX;~~=>m`38eGRs?fu;4w1%qUck-WEEq8sQ=H&`)m9f6%X@Z2gsCw}W zgQ#+iq~+ABo)J3&b_VzBjQ8I1w|m%WZlcO34df^#@5tL4zzgqx*K3XNaHfY#cfPeB!n)k((~TW+|>6Lz^Keq*eLY`wD|P zaH1X#jpBTX61*^E9Y_`i#36;UTtg#C1qu1%PhNOj`#XazFL_XUi%u$1;~Be8OKXO% zCpQRyr|zfz4hOA^PX6@b_u69NSPh@By_P`nL-(&vCf>r4-dZ-$?8IobF+)7_oK1}F z=M%fz0^hu}D`hcgRnBnNCFLO!mGa~y{e%)fs`#K- zl{5WUQp|#G2_Y0iyQ=#G#vfS1tH6ro{}yzGJ3X!l7I*c_>4A)uB@mT~&9mNQ-$|n1 zCwY*L>CHbIVcL{KsAzyQV1Uf!fXs??sAOvxL!&(lMLJYQyyFa$gS3jY*Sq?93zD|D zsN>J0X11pfaW#05j{bX*jviuqh;{75RtA@5E>vj(nSt8(E^G6@lrV-K=!o4F*D|)3 z5#Jq+por@*A_t%4$~2~2G}B7dX>r3tYLZ?*j16{pAz6$E<~8_3GvJPH*O-q=x3~s0I}Y;g^Z{Zcme_Xdm?W!)jw!U`zy> zon=PC+MW=Mgng)hNRahMEZ`9$wHFh_vWc61XG-~sdur9#lI6AJv9^-ceC7V$y9Ua# zbSP==sUG8|g;pwZp$CK{in}`;$ko7>`7w=4IS%u{`$~n!DaeGs-#m29qT&Wn%Y#}V z$zu|-5`z+^m5GPk(T^-n0mQWOlGE^!A&q2hNdt)s^>wl-A*M>Kl+Oif^%k4XH`UkV z8gW>1(M0x3yWN1)vUNbpvvZ^bC64s*ksaMNy0Br&f-dW2`2K*b4BRaErSZGwNxK-Y ztZVsx*&x-~Qo&8W>=7nuQ0BgS-AnaO;1pQU)nV?;5)`H323~4`n1(^&G*BKVT?iZQ zN)*&C;9|oFBAr4HPChu}vq)IvUKd!9@tmJMm$q-O;}_ycDExlEx z!Qh=TJ|W>QX&!8iqvgVqWS|n3r{cP`dv761!ohjdmidDFrFZ}Gt8lY?j)zuwqovj{ zt_@~0Fc@&&TFERntrQapz56>eoeih)1DWIlr|$DN8Bq+A z7Jnw%M~af})(ul?GbKp(HvJZ#clovSndzK`tHlsDbMUW9*OBhMz6#SG5DrmA`pZ@g zC1x!Ib;^|f`k()v02bX2-GCymwiQNnLjB*2-FX>*60j0_z?tTc9vO$N89@>6dqqih zrOlwQjzx5sYs-qC$p9^NBI^5IfB^s?Nkwbzca9A4ul@x4f~L1(Hw&b$Bejd%87|Ng zr4xrFmMJolH9aVS%NwwHrPZ#JvQiLBVHFr|gM#1c<&h-#0!?GGn^XZiaQfkJnAy0| zoPAeV4%*cTN`f2cH-_5wp3kPM2aYWBWS1g`Z76Hq)wCLjkP+Heih#4VP{|#r6yuEr z6V)`u#;V-{Z9V^q@-^C!`cjZHm$hL+V)KCyH;8A6;*S+_XS74D2nbMF_sz8e>5hQ{ zB{RPKRnNSgy7MH~qENmtxf%#5Q7TsSivnD}QxX!_Z`9r>i&U;|A5^Z4ZAS9ZJh#K+ zrQZ}DU1-=6uxwG;+Kj~jZ$+Do8s_22o*+Y}kGhUD7K@llER#>2Z}Vu7i$?Um68I}Y zxTJ0J-8m90!WX8+y>59O!O8$yS8ja3uTq^ySF$wOy@2laaO1FdP+c?^HaA*z1ar!h zp_E=BgG&UK;Uz=p@(k>LYqzlETF1~;FcxiJB&5mBuHWq{9DaTn_(N`0p0 zEf0LqrQkt=S_AR08ve-3(`4Y;9#-5Hayd05DJxw;$79<0jN$Nro{u5siEBX=v4;At zLh*F&df8^i7vw~^dI9q-*!Xg{0C=TlGMc^38E!)1JNg$XcNzl{cisbEHXoP3zF+RY zn2L@DFO8U(qd&SKeyjd;B_WyWsA=jQJJZve|Dd1Msnp7W(aN#Bt9v^|;dqg-JuIP4 zwc(P~LgmZxA2aS?TFf;2Jc^taPEMGkc#&DR?qaLw26}vKkF>=Or@4-n+R+HcXje=u zGWBzPg+~{x2U7Kya)dytW7w#{O@wE! zp>5$Ao@Re62)=7-U*9uNrPlb!>gWUFAmX}Z65AEW4$RJ@ zT|4ZQ{!wQIczWKA2FJ}YZ9KFLv#snmW)luIz*)5IJh(h+RF|}`{+WAmK%DYvCaZKn zT9>(!pHSShc8^E_Nhsk@*+RX3kAO{^nH869cFgWc!iHdyilierX=4BjeIxd;a8Rh+wWzR_`bHEci)g6kpA*4_(c6Lt3#$C)Pm0Mdo;JC7$&1|oi1Ij& zPQ9S&it+LyOa%P@a8ObEXfa4)U}nH6)|;q?xk|+WWCu$0e8#mT`pogdJeE_U;zf_{ z9#V(~j1*|c@_5l^ry4}pTpXmH3;)N*!EwyQGfvP`e~LL9i)Cfy%q25wsCDDxV)`G9 z?NK;Ky9#VA#~rlc5O3j@#*sVhk1#@A$6C`b-YSPL9p=E6Cu1GDrM>$|!hC0J2Kt1O^tRc@MG)+;-e(_u|!( zvUc2*2=%O44AcLJdSZ45Keh#kPf;J`c51O1*im=hAJBnbe?6SQPw3_h!)KAIGBs-Y z5cchyZGHZ5$fY<^u=6kK*u^-T(dn*>duoe!jS~6!pqn6o6~C4DC^ybThe@S*6mwIR zb*Vq3*g&e#{{yxux(Wcj^^dzq$@kB{t3$qkt5aidVROkiuZLA&PsVI0$8;nW7)pFb zXGaa9qo0!V^A$*ex4}cD3p`XXi2D_S0?tX%U;>ir6JfHIa$}~6;gxqiQs>IvNLwmn zUNoceFV4+;jS%!LCXfDD$!(I^9z)QseDCzZHh(8*`B~3?woQv7XaSPlR1o{%Y|=U~ z6ZtBx3pZMNNjWbOz662}iwm~RvmUTn|2Y`*R)9N^R2;b+w+J+0Bp}CAY;zwyN*@T^{uCa2d)-0Vpg@;GV zOzMSXbou}YPSpI?z7B#d8_TFWiT&YU%XX>CwgGQF=!mPFb>cU;Vh(pCXG=xYb`kfj zbjB-`6Mz#RKG$@786@iTMqrR5*t|g4dC>4+hPabJDYJ!@7YZvZ9d`EZtSR^Y2{~bY z9C-<;RLH;$u2%mN739^VL=X=x4;N<>RW3FD&KM(o$JB<=>}Kg2nOU|6R-^=Tqrrd937qDkJTd6 z%9}h*RDmWMyjgSyP|y=Ko6gRQFMHtoMID7^YHAKbHFC?w?2TMAb{FC4Oc@w%J^mx6va6{10hRv^N(<*9W7N!3DYY$c>CsKVR3&bFM zRaikYq_+5H0m6m`=G;|;$p&QZgZOSk1+;$i5)X1Z!mfo2K#iN=b*x7qs+#Xs0lC!I zWT0X@D3prXiwe2CCjVN9c7isiY31PAN4|J#TCsGbD!y+S%IXOPkzB%v!Z;#E0Ku!n z0I!^MhSdN;2@rqc6<8B@5W$4{QZf%Jj4Gt0U{KNHH$WM4_)^QmIvkQ<&A@r1S`WE% zLZ*-%?B~KoKbZraFe*Eicv(S+5`M?h^wdc*q-qd^c7$N|4JrR2)&jdV8A-wMyRFO< zDJdahhGH-Fd9^O*orX=yEQ}DrfveOqGes}v%xv4CMM*t$rn&ufD{6O*moR^~38?4fTUO-E}uK~hiBfWUvp3truHDj>=i4q=pCoIueyC{^YPOB`>l1aPNMd zTt;jvw%6AU5tUQ+4EXc0@Dtu0Nz=>g2CmQLch&XtfamPpu&NJa)fzr$P0D^)xEf^G z@$q}nLBYFz-?^X?@0%aw7uS}4Hxp*s91^5{e&jKp=BCUEkvxzp+sRq+ewYdCK--^k zvQ{FDakVS+#nYgMvq&NbT1^&JUeKa0SjIbt9baFKwAk9c0hzHf8+@9wV;JH$QS3!6q zzgo1e*?6K}jBQ1+U" + if h.Params != nil && len(h.Params) > 0 { + s += ";" + h.Params.String() + } + return s +} + +func (h *AddressHeader) Clone() Value { + hc := &AddressHeader{ + DisplayName: h.DisplayName, + Uri: h.Uri.Clone(), + Params: h.Params.Clone(), + } + return hc +} + +func (h *PlainHeader) String() string { + return h.Content +} + +func (h *PlainHeader) Clone() Value { + return &PlainHeader{Content: h.Content} +} + +func NewPlainHeader(v interface{}) *PlainHeader { + return &PlainHeader{Content: fmt.Sprint(v)} +} + +func NewMaxForwardHeader(b int) *MaxForwardsHeader { + return &MaxForwardsHeader{Forward: b} +} + +func (h *Header) Clone() *Header { + vv := &Header{} + h.mu.Lock() + defer h.mu.Unlock() + vv.Keys = make([]string, len(h.Keys)) + for i, k := range h.Keys { + vv.Keys[i] = k + } + vv.Values = make(map[string]Value) + for k, v := range h.Values { + vv.Values[k] = v.Clone() + } + return vv +} + +func (h *Header) Set(name string, value Value) { + h.mu.Lock() + defer h.mu.Unlock() + name = textproto.CanonicalMIMEHeaderKey(name) + if h.Keys == nil { + h.Keys = make([]string, 0) + } + if h.Values == nil { + h.Values = make(map[string]Value) + } + if _, ok := h.Values[name]; !ok { + h.Keys = append(h.Keys, name) + } + h.Values[name] = value +} + +//Get 获取指定的头信息 +func (h *Header) Get(name string) Value { + h.mu.RLock() + defer h.mu.RUnlock() + if v, ok := h.Values[textproto.CanonicalMIMEHeaderKey(name)]; ok { + return v + } + return nil +} + +func (h *Header) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + if _, ok := h.Values[textproto.CanonicalMIMEHeaderKey(name)]; ok { + return true + } + return false +} + +//String 返回字符串数据 +func (h *Header) String() string { + var ( + ok bool + val Value + sb strings.Builder + ) + for _, k := range h.Keys { + if val, ok = h.Values[k]; !ok { + continue + } + sb.WriteString(textproto.CanonicalMIMEHeaderKey(k)) + sb.WriteString(": ") + sb.WriteString(val.String()) + sb.WriteString("\r\n") + } + sb.WriteString("\r\n") + return sb.String() +} + +//parseAddressHeaderFunc 解析地址信息 +func parseAddressHeaderFunc(s string) (header Value, err error) { + var ( + pos int + tagBegin int + tagEnd int + length int + ) + tagBegin, tagEnd = -1, -1 + hv := &AddressHeader{Uri: &Uri{}} + length = len(s) + for pos = 0; pos < length; pos++ { + if s[pos] == '<' { + hv.DisplayName = strings.Trim(strings.TrimSpace(s[:pos]), "\"") + tagBegin = pos + } + if s[pos] == '>' { + tagEnd = pos + } + } + if tagBegin == -1 || tagEnd == -1 { + err = fmt.Errorf("missing '<>' %s", s) + return + } + ss := s[tagBegin+1 : tagEnd] + if hv.Uri, err = parseUri(ss); err != nil { + return + } + if tagEnd != length-1 { + hv.Params, err = parseMap(s[tagEnd+1:]) + } + header = hv + return +} + +func parseAuthorizationHeaderFunc(s string) (header Value, err error) { + var ( + pos int + key string + val string + ) + hv := &AuthorizationHeader{} + if pos = strings.Index(s, " "); pos == -1 { + return + } + hv.Method = s[:pos] + ss := strings.Split(s[pos+1:], ",") + + for _, sp := range ss { + if pos = strings.Index(sp, "="); pos != -1 { + key = strings.TrimSpace(sp[:pos]) + val = strings.Trim(strings.TrimSpace(sp[pos+1:]), "\"") + switch strings.ToLower(key) { + case "username": + hv.Username = val + case "realm": + hv.Realm = val + case "nonce": + hv.Nonce = val + case "response": + hv.Response = val + case "cnonce": + hv.CNonce = val + case "nc": + hv.NC = val + case "qop": + hv.QOP = val + case "algorithm": + hv.Algorithm = val + case "uri": + hv.Uri, err = parseUri(val) + } + } + } + header = hv + return +} + +//parseArrayHeaderFunc 解析数组信息 +func parseArrayHeaderFunc(s string) (header Value, err error) { + hv := &ArrayHeader{} + ss := strings.Split(s, ",") + hv.Values = make([]string, len(ss)) + for i, vs := range ss { + hv.Values[i] = strings.TrimSpace(vs) + } + header = hv + return +} + +//parseAddressHeaderFunc 解析纯文本头 +func parseMaxForwardHeaderFunc(s string) (header Value, err error) { + h := &MaxForwardsHeader{} + h.Forward, err = strconv.Atoi(strings.TrimSpace(s)) + header = h + return +} + +//parseAddressHeaderFunc 解析纯文本头 +func parsePlainsHeaderFunc(s string) (header Value, err error) { + header = &PlainHeader{Content: s} + return +} + +//parseSequenceHeaderFunc 解析seq头信息 +func parseSequenceHeaderFunc(s string) (header Value, err error) { + hv := &SequenceHeader{} + ss := strings.Split(s, " ") + if len(ss) == 2 { + hv.Method = Method(strings.TrimSpace(ss[1])) + hv.Sequence, err = strconv.Atoi(ss[0]) + } else { + err = fmt.Errorf("unknown string %s", s) + } + header = hv + return +} + +func parseViaHeaderFunc(s string) (header Value, err error) { + var ( + ps string + ) + hv := &ViaHeader{} + ss := strings.Split(s, " ") + if len(ss) < 2 { + err = fmt.Errorf("unknown string '%s'", s) + return + } + ps = ss[1] + ss = strings.Split(ss[0], "/") + if len(ss) == 3 { + hv.Protocol, hv.ProtocolVersion, hv.Transport = ss[0], ss[1], ss[2] + } else if len(ss) == 2 { + hv.Protocol, hv.ProtocolVersion = ss[0], ss[1] + hv.Transport = ProtoUDP + } else { + err = fmt.Errorf("invalid protocol string %s", ss[0]) + return + } + hv.Uri, err = parseUri(ps) + header = hv + return +} + +//parseHeader 解析头部 +func parseHeader(s string) (key string, value Value, err error) { + var ( + pos int + ok bool + str string + fun ParserHeaderFunc + ) + pos = strings.Index(s, ":") + if pos == -1 { + err = fmt.Errorf("unexpected multi-line response: %s", s) + return + } + key = s[:pos] + str = strings.TrimSpace(s[pos+1:]) + if fun, ok = funcMap[key]; ok { + value, err = fun(str) + } else { + value, err = parsePlainsHeaderFunc(str) + } + return +} + +//readHeader read head message +func readHeader(b *bufio.Reader) (header *Header, err error) { + var ( + line string + key string + value Value + ) + header = &Header{} + tr := textproto.NewReader(b) + for { + if line, err = tr.ReadLine(); err != nil { + if err == io.EOF { + err = nil + } + break + } + //读取完毕 + if len(line) == 0 { + break + } + if key, value, err = parseHeader(strings.TrimSpace(line)); err != nil { + continue + } + header.Set(key, value) + } + return +} diff --git a/header_test.go b/header_test.go new file mode 100644 index 0000000..71c96da --- /dev/null +++ b/header_test.go @@ -0,0 +1,14 @@ +package sip + +import ( + "fmt" + "testing" +) + +func Test_parseViaHeaderFunc(t *testing.T) { + if hv, err := parseViaHeaderFunc("SIP/2.0/TCP 192.168.4.169:40828;branch=z9hG4bK-524287-1---405263a6a9549471"); err != nil { + t.Error(err) + } else { + fmt.Println(hv) + } +} diff --git a/method.go b/method.go new file mode 100644 index 0000000..5037f83 --- /dev/null +++ b/method.go @@ -0,0 +1,38 @@ +package sip + +import "strings" + +type Method string + +// Determine if the given method equals some other given method. +// This is syntactic sugar for case insensitive equality checking. +func (method *Method) Equals(other *Method) bool { + if method != nil && other != nil { + return strings.EqualFold(string(*method), string(*other)) + } else { + return method == other + } +} + +func (method *Method) Is(s string) bool { + return strings.ToLower(string(*method)) == strings.ToLower(s) +} + +//String +func (method Method) String() string { + return string(method) +} + +// It's nicer to avoid using raw strings to represent methods, so the following standard +// method names are defined here as constants for convenience. +const ( + MethodInvite Method = "INVITE" + MethodAck Method = "ACK" + MethodCancel Method = "CANCEL" + MethodBye Method = "BYE" + MethodRegister Method = "REGISTER" + MethodOptions Method = "OPTIONS" + MethodSubscribe Method = "SUBSCRIBE" + MethodNotify Method = "NOTIFY" + MethodRefer Method = "REFER" +) diff --git a/pool/reader.go b/pool/reader.go new file mode 100644 index 0000000..2166b77 --- /dev/null +++ b/pool/reader.go @@ -0,0 +1,41 @@ +package pool + +import ( + "bufio" + "bytes" + "io" + "sync" +) + +var ( + bytesReaderPool sync.Pool + bufioReaderPool sync.Pool +) + +func GetBytesReader(buf []byte) *bytes.Reader { + if v := bytesReaderPool.Get(); v == nil { + return bytes.NewReader(buf) + } else { + r := v.(*bytes.Reader) + r.Reset(buf) + return r + } +} + +func PutBytesReader(r *bytes.Reader) { + bytesReaderPool.Put(r) +} + +func GetBufioReader(r io.Reader) *bufio.Reader { + if v := bufioReaderPool.Get(); v == nil { + return bufio.NewReader(r) + } else { + br := v.(*bufio.Reader) + br.Reset(r) + return br + } +} + +func PutBufioReader(r *bufio.Reader) { + bufioReaderPool.Put(r) +} diff --git a/proxy/conn.go b/proxy/conn.go new file mode 100644 index 0000000..16c1e40 --- /dev/null +++ b/proxy/conn.go @@ -0,0 +1,41 @@ +package proxy + +import ( + "github.com/uole/sip" + "net" +) + +type ( + Conn interface { + Addr() net.Addr + Request(req *sip.Request) (err error) + Response(res *sip.Response) (err error) + } + + UdpConn struct { + addr *net.UDPAddr + conn *net.UDPConn + } +) + +func (conn *UdpConn) Addr() net.Addr { + return conn.addr +} + +func (conn *UdpConn) Request(req *sip.Request) (err error) { + _, err = conn.conn.WriteToUDP(req.Bytes(), conn.addr) + return +} + +func (conn *UdpConn) Response(res *sip.Response) (err error) { + _, err = conn.conn.WriteToUDP(res.Bytes(), conn.addr) + return +} + +func newUDPConn(addr string, conn *net.UDPConn) *UdpConn { + udpAddr, _ := net.ResolveUDPAddr("udp", addr) + return &UdpConn{ + addr: udpAddr, + conn: conn, + } +} diff --git a/proxy/context.go b/proxy/context.go new file mode 100644 index 0000000..e1f85ba --- /dev/null +++ b/proxy/context.go @@ -0,0 +1,42 @@ +package proxy + +import ( + "context" + "github.com/uole/sip" +) + +const ( + DirectionRequest = 0x01 + DirectionResponse = 0x02 +) + +type Message struct { + context context.Context + direction int + request *sip.Request + response *sip.Response +} + +func (ctx *Message) CallID() string { + if ctx.direction == DirectionRequest { + return ctx.Request().CallID() + } else { + return ctx.Response().CallID() + } +} + +func (ctx *Message) Context() context.Context { + return ctx.context +} + +func (ctx *Message) Direction() int { + return ctx.direction +} + +func (ctx *Message) Request() *sip.Request { + return ctx.request +} + +func (ctx *Message) Response() *sip.Response { + return ctx.response +} diff --git a/proxy/process.go b/proxy/process.go new file mode 100644 index 0000000..9c955c8 --- /dev/null +++ b/proxy/process.go @@ -0,0 +1,36 @@ +package proxy + +import "time" + +//Process the process flow +type Process struct { + id string //process call id + caller Conn //caller conn + callee Conn //callee conn + route *Route //process route + relationship *Relationship + stacks []*Message //stacks + createdAt time.Time + updatedAt time.Time +} + +func (proc *Process) Ready() bool { + return proc.caller != nil && proc.callee != nil +} + +func (proc *Process) Caller() Conn { + return proc.caller +} + +func (proc *Process) Callee() Conn { + return proc.callee +} + +func (proc *Process) Push(msg *Message) { + proc.updatedAt = time.Now() + proc.stacks = append(proc.stacks, msg) +} + +func NewProcess(id string) *Process { + return &Process{id: id, createdAt: time.Now(), stacks: make([]*Message, 0)} +} diff --git a/proxy/relationship.go b/proxy/relationship.go new file mode 100644 index 0000000..63aad43 --- /dev/null +++ b/proxy/relationship.go @@ -0,0 +1,8 @@ +package proxy + +type Relationship struct { + User string + Domain string + OriginalDomain string + Conn Conn +} diff --git a/proxy/reverseproxy.go b/proxy/reverseproxy.go new file mode 100644 index 0000000..421b4a3 --- /dev/null +++ b/proxy/reverseproxy.go @@ -0,0 +1,417 @@ +package proxy + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/uole/sip" + "github.com/uole/sip/pool" + "log" + "net" + "strconv" + "sync" + "time" +) + +var ( + responseFeature = []byte("SIP") + + ErrorMissingToHead = errors.New("head to missing") +) + +type ( + ReverseProxy struct { + ctx context.Context + udpConn *net.UDPConn + processLocker sync.RWMutex + processes map[string]*Process //处理铜须 + transChan chan *Transaction //事物处理 + routes []*Route //路由表 + relationshipLocker sync.RWMutex + relationships map[string]*Relationship //关系表 + } +) + +//rewriteRequest 重写sip请求 +func (rp *ReverseProxy) rewriteRequest(trans *Transaction) *sip.Request { + originalRequest := trans.Request() + rewriteRequest := originalRequest.Clone() + //match address + if rewriteRequest.Address == trans.transport.Addr().String() { + if trans.Address() == trans.Caller().Addr().String() { + rewriteRequest.Address = trans.Callee().Addr().String() + } else { + rewriteRequest.Address = trans.Caller().Addr().String() + } + } + if originalRequest.Params != nil { + rewriteRequest.Params = originalRequest.Params.Clone() + } + if originalRequest.Header.Has(sip.HeaderContact) { + originalContactHeader := originalRequest.Header.Get(sip.HeaderContact).(*sip.AddressHeader) + rewriteContactHeader := &sip.AddressHeader{ + Uri: sip.NewUri(originalContactHeader.Uri.User, trans.transport.Addr().String(), sip.Map{}).EnableProtocol(), + Params: originalContactHeader.Params.Clone(), + } + rewriteContactHeader.Uri.Params.Set("transport", trans.transport.Network()) + rewriteRequest.Header.Set(sip.HeaderContact, rewriteContactHeader) + } + if originalRequest.Header.Has(sip.HeaderVia) { + originalViaHeader := originalRequest.Header.Get(sip.HeaderVia).(*sip.ViaHeader) + rewriteViaHeader := &sip.ViaHeader{ + Protocol: "SIP", + ProtocolVersion: "2.0", + Transport: "UDP", + Uri: sip.NewUri("", trans.transport.Addr().String(), originalViaHeader.Uri.Params.Clone()), + } + rewriteRequest.Header.Set(sip.HeaderVia, rewriteViaHeader) + } + + if rewriteRequest.Header.Has(sip.HeaderFrom) { + fromHeader := rewriteRequest.Header.Get(sip.HeaderFrom).(*sip.AddressHeader) + fromHeader.Uri.Params.Set("transport", trans.transport.Network()) + if rewrite, ok := trans.Rewrite(); ok { + if fromHeader.Uri.Host == rewrite.From { + fromHeader.Uri.Host = rewrite.To + fromHeader.Uri.Port = 0 + } else if fromHeader.Uri.Host == rewrite.To { + fromHeader.Uri.Host = rewrite.From + } + } + } + + if rewriteRequest.Header.Has(sip.HeaderTo) { + toHeader := rewriteRequest.Header.Get(sip.HeaderTo).(*sip.AddressHeader) + toHeader.Uri.Params.Set("transport", trans.transport.Network()) + if rewrite, ok := trans.Rewrite(); ok { + //呼出场景 + if toHeader.Uri.Host == rewrite.From { + toHeader.Uri.Host = rewrite.To + toHeader.Uri.Port = 0 + } else if toHeader.Uri.Host == rewrite.To { + toHeader.Uri.Host = rewrite.From + } + //呼入场景 + if toHeader.Uri.Address() == trans.transport.Addr().String() { + if trans.Address() == trans.Caller().Addr().String() { + toHeader.Uri.SetAddress(trans.Callee().Addr().String()) + } else { + toHeader.Uri.SetAddress(trans.Caller().Addr().String()) + } + } + } + } + return rewriteRequest +} + +//rewriteResponse 重写sip响应消息 +func (rp *ReverseProxy) rewriteResponse(trans *Transaction) *sip.Response { + originalResponse := trans.Response() + rewriteResponse := originalResponse.Clone() + if originalResponse.Header.Has(sip.HeaderVia) { + originalViaHeader := originalResponse.Header.Get(sip.HeaderVia).(*sip.ViaHeader) + rewriteViaHeader := &sip.ViaHeader{ + Protocol: "SIP", + ProtocolVersion: "2.0", + Transport: "UDP", + Uri: sip.NewUri("", trans.Caller().Addr().String(), originalViaHeader.Uri.Params.Clone()), + } + if trans.Address() == trans.Caller().Addr().String() { + rewriteViaHeader.Uri = sip.NewUri("", trans.Callee().Addr().String(), sip.Map{}) + } else { + rewriteViaHeader.Uri = sip.NewUri("", trans.Caller().Addr().String(), sip.Map{}) + } + if originalViaHeader.Uri.Params.Get("branch") != "" { + rewriteViaHeader.Uri.Params.Set("branch", originalViaHeader.Uri.Params.Get("branch")) + } + rewriteViaHeader.Uri.Params.Set("rport", strconv.Itoa(rewriteViaHeader.Uri.Port)) + rewriteResponse.Header.Set(sip.HeaderVia, rewriteViaHeader) + } + if originalResponse.Header.Has(sip.HeaderContact) { + originalContactHeader := originalResponse.Header.Get(sip.HeaderContact).(*sip.AddressHeader) + rewriteContactHeader := &sip.AddressHeader{ + Uri: sip.NewUri(originalContactHeader.Uri.User, trans.transport.Addr().String(), sip.Map{}).EnableProtocol(), + Params: originalContactHeader.Params.Clone(), + } + rewriteContactHeader.Uri.Params.Set("transport", trans.transport.Network()) + rewriteResponse.Header.Set(sip.HeaderContact, rewriteContactHeader) + } + if rewriteResponse.Header.Has(sip.HeaderFrom) { + fromHeader := rewriteResponse.Header.Get(sip.HeaderFrom).(*sip.AddressHeader) + fromHeader.Uri.Params.Set("transport", trans.transport.Network()) + if rewrite, ok := trans.Rewrite(); ok { + //呼出场景 + if fromHeader.Uri.Host == rewrite.To { + fromHeader.Uri.Host = rewrite.From + } else if fromHeader.Uri.Host == rewrite.From { + fromHeader.Uri.Host = rewrite.To + } + } + } + if rewriteResponse.Header.Has(sip.HeaderTo) { + toHeader := rewriteResponse.Header.Get(sip.HeaderTo).(*sip.AddressHeader) + toHeader.Uri.Params.Set("transport", trans.transport.Network()) + if rewrite, ok := trans.Rewrite(); ok { + //呼出场景 + if toHeader.Uri.Host == rewrite.To { + toHeader.Uri.Host = rewrite.From + } else if toHeader.Uri.Host == rewrite.From { + toHeader.Uri.Host = rewrite.To + } + //呼入场景 + if toHeader.Uri.Address() == trans.transport.Addr().String() { + if trans.Address() == trans.Caller().Addr().String() { + toHeader.Uri.SetAddress(trans.Callee().Addr().String()) + } else { + toHeader.Uri.SetAddress(trans.Caller().Addr().String()) + } + } + //呼入场景 + if toHeader.Uri.Address() == trans.Callee().Addr().String() { + toHeader.Uri.SetAddress(trans.transport.Addr().String()) + } + } + } + return rewriteResponse +} + +//roundTripper 数据转发 +func (rp *ReverseProxy) roundTripper(trans *Transaction) (err error) { + if trans.message.Direction() == DirectionRequest { + request := rp.rewriteRequest(trans) + if request.Header.Has(sip.HeaderMaxForwards) { + forwardHeader := request.Header.Get(sip.HeaderMaxForwards).(*sip.MaxForwardsHeader) + forwardHeader.Forward = forwardHeader.Forward - 1 + if forwardHeader.Forward <= 0 { + err = trans.Caller().Response(sip.NewResponse(sip.StatusLoopDetected, request)) + return + } + } + if trans.Address() == trans.Caller().Addr().String() { + err = trans.Callee().Request(request) + } else { + err = trans.Caller().Request(request) + } + } else { + response := rp.rewriteResponse(trans) + if trans.Address() == trans.Callee().Addr().String() { + err = trans.Caller().Response(response) + } else { + err = trans.Callee().Response(response) + } + } + return +} + +//updateRelationship 更新绑定关系 +func (rp *ReverseProxy) updateRelationship(conn Conn, msg *Message) *Relationship { + var ( + domainName string + ) + req := msg.Request() + fromHead := req.Header.Get(sip.HeaderFrom).(*sip.AddressHeader) + domainName = fromHead.Uri.Host + //if domain rewrite rules exists + for _, route := range rp.routes { + if route.Domain == domainName { + if route.RewriteTo != "" { + domainName = route.RewriteTo + } + break + } + } + username := fmt.Sprintf("%s@%s", fromHead.Uri.User, domainName) + rp.relationshipLocker.Lock() + defer rp.relationshipLocker.Unlock() + relationship, ok := rp.relationships[username] + if !ok { + relationship = &Relationship{ + User: fromHead.Uri.User, + Domain: domainName, + OriginalDomain: fromHead.Uri.Host, + } + rp.relationships[username] = relationship + log.Printf("bind user %s relationship %s", username, conn.Addr().String()) + } + relationship.Conn = conn + return relationship +} + +//findRoute 查找请求的路由 +func (rp *ReverseProxy) findRoute(req *sip.Request) (route *Route, err error) { + fromHead := req.Header.Get(sip.HeaderFrom).(*sip.AddressHeader) + domainName := fromHead.Uri.Host + for _, r := range rp.routes { + if r.Domain == domainName { + route = r + break + } + } + if route == nil { + err = fmt.Errorf("domain %s route not found", domainName) + } + return +} + +//findRelationship 查找绑定关系 +func (rp *ReverseProxy) findRelationship(req *sip.Request) (relationship *Relationship, err error) { + var ( + ok bool + ) + if !req.Header.Has(sip.HeaderTo) { + err = ErrorMissingToHead + return + } + if !req.Header.Has(sip.HeaderContact) { + err = ErrorMissingToHead + return + } + toHead := req.Header.Get(sip.HeaderTo).(*sip.AddressHeader) + contactHead := req.Header.Get(sip.HeaderContact).(*sip.AddressHeader) + rp.relationshipLocker.RLock() + defer rp.relationshipLocker.RUnlock() + username := fmt.Sprintf("%s@%s", toHead.Uri.User, toHead.Uri.Host) + //如果直接找到对应的用户信息 + if relationship, ok = rp.relationships[username]; ok { + return + } + //使用IP的方式进行查找数据 + username = fmt.Sprintf("%s@%s", toHead.Uri.User, contactHead.Uri.Host) + if relationship, ok = rp.relationships[username]; ok { + return + } + err = fmt.Errorf("%s relationship not found", username) + return +} + +//getProcess 获取一个处理器 +func (rp *ReverseProxy) getProcess(conn Conn, msg *Message) (process *Process, err error) { + var ( + ok bool + route *Route + relationship *Relationship + ) + rp.processLocker.Lock() + defer rp.processLocker.Unlock() + if process, ok = rp.processes[msg.CallID()]; ok { + return + } + if msg.Direction() == DirectionResponse { + err = fmt.Errorf("not found") + return + } + process = NewProcess(msg.CallID()) + process.caller = conn + //bypass route + if route, err = rp.findRoute(msg.Request()); err == nil { + process.callee = newUDPConn(route.Address(), rp.udpConn) + process.route = route + rp.processes[msg.CallID()] = process + if msg.Direction() == DirectionRequest && msg.Request().Method == sip.MethodRegister { + rp.updateRelationship(conn, msg) + } + return + } + //find relationship + if relationship, err = rp.findRelationship(msg.Request()); err == nil { + process.callee = relationship.Conn + process.relationship = relationship + rp.processes[msg.CallID()] = process + } + return +} + +func (rp *ReverseProxy) udpServe(addr string) (err error) { + var ( + n int + proc *Process + remoteAddr *net.UDPAddr + localAddr *net.UDPAddr + ) + if localAddr, err = net.ResolveUDPAddr("udp", addr); err != nil { + return + } + if rp.udpConn, err = net.ListenUDP("udp", localAddr); err != nil { + return + } + buf := make([]byte, 1024*32) + for { + if n, remoteAddr, err = rp.udpConn.ReadFromUDP(buf); err != nil { + break + } + if n < 3 { + continue + } + msg := &Message{} + bytesReader := pool.GetBytesReader(buf[:n]) + bufioReader := pool.GetBufioReader(bytesReader) + if bytes.Compare(buf[:3], responseFeature) == 0 { + msg.direction = DirectionResponse + msg.response, err = sip.ReadResponse(bufioReader) + } else { + msg.direction = DirectionRequest + msg.request, err = sip.ReadRequest(bufioReader) + } + pool.PutBytesReader(bytesReader) + pool.PutBufioReader(bufioReader) + if err != nil { + log.Printf("parse sip message error: %s", err.Error()) + continue + } + //获取处理程序 + if proc, err = rp.getProcess(&UdpConn{conn: rp.udpConn, addr: remoteAddr}, msg); err != nil { + if msg.Direction() == DirectionRequest { + res := sip.NewResponse(sip.StatusTemporarilyUnavailable, msg.Request()) + _, _ = rp.udpConn.WriteToUDP(res.Bytes(), remoteAddr) + } + log.Printf("get sip message %s process error: %s", msg.CallID(), err.Error()) + continue + } + trans := newTransaction(msg, proc, remoteAddr, newUDPTransport(rp.udpConn)) + trans.process.Push(msg) + select { + case rp.transChan <- trans: + case <-rp.ctx.Done(): + case <-time.After(time.Millisecond * 100): + } + } + return +} + +func (rp *ReverseProxy) eventLoop() { + for { + select { + case trans := <-rp.transChan: + if err := rp.roundTripper(trans); err != nil { + log.Printf(err.Error()) + } + case <-rp.ctx.Done(): + return + } + } +} + +//Serve 开启服务 +func (rp *ReverseProxy) Serve(addr string) (err error) { + go func() { + err = rp.udpServe(addr) + }() + rp.eventLoop() + return +} + +//NewReverse 穿件一个代理服务 +func NewReverse(routes []*Route) *ReverseProxy { + proxy := &ReverseProxy{ + transChan: make(chan *Transaction, 1024), + ctx: context.Background(), + processes: make(map[string]*Process), + relationships: make(map[string]*Relationship), + routes: routes, + } + if proxy.routes == nil { + proxy.routes = make([]*Route, 0) + } + return proxy +} diff --git a/proxy/reverseproxy_test.go b/proxy/reverseproxy_test.go new file mode 100644 index 0000000..89010ee --- /dev/null +++ b/proxy/reverseproxy_test.go @@ -0,0 +1,10 @@ +package proxy + +import ( + "testing" +) + +func TestNewReverseProxy(t *testing.T) { + serve := NewReverse(nil) + _ = serve.Serve("192.168.4.169:5060") +} diff --git a/proxy/route.go b/proxy/route.go new file mode 100644 index 0000000..2989a82 --- /dev/null +++ b/proxy/route.go @@ -0,0 +1,14 @@ +package proxy + +//Route 代理走的路由规则 +type Route struct { + index int32 + Domain string `json:"domain" yaml:"domain"` //域名 + RewriteTo string `json:"rewrite_to" yaml:"rewriteTo"` //对域名进行重写处理 + Backend []string `json:"backend" yaml:"backend"` //代理的后端地址,多个地址使用轮询获取地址 +} + +func (r *Route) Address() string { + idx := int(r.index) % len(r.Backend) + return r.Backend[idx] +} diff --git a/proxy/transaction.go b/proxy/transaction.go new file mode 100644 index 0000000..157120d --- /dev/null +++ b/proxy/transaction.go @@ -0,0 +1,73 @@ +package proxy + +import ( + "github.com/uole/sip" + "net" +) + +type ( + rewriter struct { + From string + To string + } + + Transaction struct { + address string + process *Process + message *Message + transport Transport + } +) + +func (t *Transaction) ID() string { + return t.message.CallID() +} + +func (t *Transaction) Address() string { + return t.address +} + +func (t *Transaction) Caller() Conn { + return t.process.Caller() +} + +func (t *Transaction) Callee() Conn { + return t.process.Callee() +} + +func (t *Transaction) Request() *sip.Request { + return t.message.Request() +} + +func (t *Transaction) Response() *sip.Response { + return t.message.Response() +} + +func (t *Transaction) Rewrite() (*rewriter, bool) { + if t.process.route != nil { + if t.process.route.RewriteTo != "" { + return &rewriter{ + From: t.process.route.Domain, + To: t.process.route.RewriteTo, + }, true + } + } + if t.process.relationship != nil { + if t.process.relationship.OriginalDomain != t.process.relationship.Domain { + return &rewriter{ + From: t.process.relationship.OriginalDomain, + To: t.process.relationship.Domain, + }, true + } + } + return nil, false +} + +func newTransaction(msg *Message, proc *Process, source net.Addr, transport Transport) *Transaction { + return &Transaction{ + process: proc, + message: msg, + address: source.String(), + transport: transport, + } +} diff --git a/proxy/transport.go b/proxy/transport.go new file mode 100644 index 0000000..59ca9bc --- /dev/null +++ b/proxy/transport.go @@ -0,0 +1,24 @@ +package proxy + +import "net" + +type Transport interface { + Network() string + Addr() net.Addr +} + +type udpTransport struct { + conn *net.UDPConn +} + +func (t *udpTransport) Network() string { + return "UDP" +} + +func (t *udpTransport) Addr() net.Addr { + return t.conn.LocalAddr() +} + +func newUDPTransport(conn *net.UDPConn) *udpTransport { + return &udpTransport{conn: conn} +} diff --git a/request.go b/request.go new file mode 100644 index 0000000..782da8d --- /dev/null +++ b/request.go @@ -0,0 +1,168 @@ +package sip + +import ( + "bufio" + "context" + "github.com/google/uuid" + "io" + "strconv" + "strings" + "unsafe" +) + +type Request struct { + Method Method + Username string + Address string + Proto string + Header *Header + Body []byte + Params Map + Context context.Context +} + +func (r *Request) WithContext(ctx context.Context) *Request { + r.Context = ctx + return r +} + +func (r *Request) Clone() *Request { + req := &Request{ + Method: r.Method, + Username: r.Username, + Address: r.Address, + Proto: r.Proto, + Header: r.Header.Clone(), + Context: r.Context, + } + if r.Params != nil { + req.Params = r.Params.Clone() + } + if r.Body != nil { + req.Body = make([]byte, len(r.Body)) + copy(req.Body[:], r.Body[:]) + } + return req +} + +func (r *Request) CallID() string { + var callId string + if head := r.Header.Get(HeaderCallID); head == nil { + callId = uuid.New().String() + r.Header.Set(HeaderCallID, &PlainHeader{Content: callId}) + } else { + callId = head.(*PlainHeader).Content + } + return callId +} + +func (r *Request) Bytes() []byte { + str := r.String() + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{str, len(str)}, + )) +} + +func (r *Request) String() string { + var sb strings.Builder + sb.WriteString(string(r.Method) + " ") + sb.WriteString("sip:") + if r.Username != "" { + sb.WriteString(r.Username + "@") + } + sb.WriteString(r.Address) + if r.Params != nil { + sb.WriteString(";" + r.Params.String()) + } + sb.WriteString(" ") + sb.WriteString(r.Proto) + sb.WriteString("\r\n") + if len(r.Body) == 0 { + r.Header.Set(HeaderContentLength, &PlainHeader{Content: "0"}) + } else { + r.Header.Set(HeaderContentLength, &PlainHeader{Content: strconv.Itoa(len(r.Body))}) + } + sb.WriteString(r.Header.String()) + if r.Body != nil { + sb.Write(r.Body) + } + return sb.String() +} + +func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + s2 += s1 + 1 + return strings.TrimSpace(line[:s1]), strings.TrimSpace(line[s1+1 : s2]), strings.TrimSpace(line[s2+1:]), true +} + +func ReadRequest(b *bufio.Reader) (req *Request, err error) { + var ( + ok bool + method string + str string + buf []byte + pos int + contentLength int + ) + req = &Request{} + if buf, _, err = b.ReadLine(); err != nil { + return + } + if method, str, req.Proto, ok = parseRequestLine(string(buf)); !ok { + return + } + if pos = strings.Index(str, ":"); pos > -1 { + str = str[pos+1:] + } + if pos = strings.Index(str, "@"); pos > -1 { + req.Username = str[:pos] + str = str[pos+1:] + } + if pos = strings.Index(str, ";"); pos > -1 { + req.Address = str[:pos] + if req.Params, err = parseMap(str[pos+1:]); err != nil { + return + } + } else { + req.Address = str + } + req.Method = Method(method) + if req.Header, err = readHeader(b); err != nil { + return + } + if req.Header.Has(HeaderContentLength) { + contentLength, _ = strconv.Atoi(req.Header.Get(HeaderContentLength).String()) + if contentLength > 0 { + req.Body = make([]byte, contentLength) + contentLength, err = io.ReadFull(b, req.Body) + } + } + return +} + +func NewRequest(method Method, domain string) *Request { + req := &Request{ + Method: method, + Address: domain, + Proto: "SIP/2.0", + Header: &Header{}, + Body: nil, + } + return req +} + +func NewDefaultRequest(method Method, domain string) *Request { + req := NewRequest(method, domain) + req.Header.Set(HeaderAllow, NewArrayHeader("INVITE", "ACK", "CANCEL", "BYE", "NOTIFY", "REFER", "MESSAGE", "OPTIONS", "INFO", "SUBSCRIBE")) + req.Header.Set(HeaderSupported, NewArrayHeader("replaces", "norefersub", "extended-refer", "timer", "outbound", "path", "X-cisco-serviceuri")) + req.Header.Set(HeaderAllowEvents, NewArrayHeader("presence", "kpml")) + req.Header.Set(HeaderMaxForwards, NewMaxForwardHeader(70)) + return req +} diff --git a/request_test.go b/request_test.go new file mode 100644 index 0000000..e390bbc --- /dev/null +++ b/request_test.go @@ -0,0 +1,49 @@ +package sip + +import ( + "bufio" + "bytes" + "fmt" + "testing" +) + +func TestReadRequest(t *testing.T) { + s := []byte(`INVITE sip:6363@192.168.4.169:48273;rinstance=a73836e86ca6411f SIP/2.0 +Via: SIP/2.0/UDP 192.168.9.186:5060;rport;branch=z9hG4bKPjc04b36f9-2b54-4620-9693-cb7674e6954c +From: "15625229038" ;tag=73ddb69f-1471-454c-876f-a732b88f96fb +To: +Contact: +Call-ID: 91182449-1b4a-4488-a9ae-d150a2271cb8 +CSeq: 7286 INVITE +Allow: OPTIONS, SUBSCRIBE, NOTIFY, PUBLISH, INVITE, ACK, BYE, CANCEL, UPDATE, PRACK, REGISTER, REFER, MESSAGE +Supported: 100rel, timer, replaces, norefersub +Session-Expires: 1800 +Min-SE: 90 +Max-Forwards: 70 +User-Agent: FPBX-13.0.192.8(13.27.0) +Content-Type: application/sdp +Content-Length: 362 + +v=0 +o=- 961825727 961825727 IN IP4 192.168.9.186 +s=Asterisk +c=IN IP4 192.168.9.186 +t=0 0 +m=audio 10558 RTP/AVP 0 8 3 111 18 101 +a=rtpmap:0 PCMU/8000 +a=rtpmap:8 PCMA/8000 +a=rtpmap:3 GSM/8000 +a=rtpmap:111 G726-32/8000 +a=rtpmap:18 G729/8000 +a=fmtp:18 annexb=no +a=rtpmap:101 telephone-event/8000 +a=fmtp:101 0-16 +a=ptime:20 +a=maxptime:150 +a=sendrecv`) + if r, err := ReadRequest(bufio.NewReader(bytes.NewReader(s))); err != nil { + t.Error(err) + } else { + fmt.Println(r) + } +} diff --git a/response.go b/response.go new file mode 100644 index 0000000..cca421a --- /dev/null +++ b/response.go @@ -0,0 +1,140 @@ +package sip + +import ( + "bufio" + "io" + "strconv" + "strings" + "unsafe" +) + +type Response struct { + Proto string + Status string + StatusCode int + Header *Header + Body []byte + ContentLength int + Request *Request +} + +func parseResponseLine(line string) (proto string, statusCode int, status string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + var err error + s2 += s1 + 1 + proto = strings.TrimSpace(line[:s1]) + if statusCode, err = strconv.Atoi(strings.TrimSpace(line[s1:s2])); err == nil { + ok = true + } + status = strings.TrimSpace(line[s2:]) + return +} + +func ReadResponse(b *bufio.Reader) (res *Response, err error) { + var ( + ok bool + buf []byte + ) + res = &Response{} + if buf, _, err = b.ReadLine(); err != nil { + return + } + if res.Proto, res.StatusCode, res.Status, ok = parseResponseLine(string(buf)); !ok { + return + } + if res.Header, err = readHeader(b); err != nil { + return + } + res.ContentLength, _ = strconv.Atoi(res.Header.Get(HeaderContentLength).String()) + if res.ContentLength > 0 { + res.Body = make([]byte, res.ContentLength) + res.ContentLength, err = io.ReadFull(b, res.Body) + } + return +} +func (r *Response) Clone() *Response { + res := &Response{ + Proto: r.Proto, + Status: r.Status, + StatusCode: r.StatusCode, + Header: r.Header.Clone(), + Body: nil, + ContentLength: r.ContentLength, + Request: r.Request, + } + if r.Body != nil { + res.Body = make([]byte, len(r.Body)) + copy(res.Body, r.Body) + } + return res +} + +func (r *Response) CallID() string { + if head := r.Header.Get(HeaderCallID); head != nil { + return head.(*PlainHeader).Content + } + return "" +} + +func (r *Response) Bytes() []byte { + str := r.String() + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{str, len(str)}, + )) +} + +func (r *Response) String() string { + var sb strings.Builder + if r.StatusCode == 0 { + r.StatusCode = StatusOK + } + if r.Status == "" { + r.Status = StatusText(r.StatusCode) + } + if r.Proto == "" { + r.Proto = "SIP/2.0" + } + sb.WriteString(r.Proto + " " + strconv.Itoa(r.StatusCode) + " " + r.Status + "\r\n") + if len(r.Body) == 0 { + r.Header.Set(HeaderContentLength, &PlainHeader{Content: "0"}) + } else { + r.Header.Set(HeaderContentLength, &PlainHeader{Content: strconv.Itoa(len(r.Body))}) + } + sb.WriteString(r.Header.String()) + if r.Body != nil { + sb.Write(r.Body) + } + return sb.String() +} + +func NewResponse(code int, req *Request) *Response { + res := &Response{ + Proto: "SIP/2.0", + Status: StatusText(code), + Header: &Header{}, + StatusCode: code, + } + if req == nil { + return res + } + if req.Header.Has(HeaderCSeq) { + res.Header.Set(HeaderCSeq, req.Header.Get(HeaderCSeq).Clone()) + } + if req.Header.Has(HeaderCallID) { + res.Header.Set(HeaderCallID, req.Header.Get(HeaderCallID).Clone()) + } + if req.Header.Has(HeaderFrom) { + res.Header.Set(HeaderFrom, req.Header.Get(HeaderFrom).Clone()) + } + if req.Header.Has(HeaderTo) { + res.Header.Set(HeaderTo, req.Header.Get(HeaderTo).Clone()) + } + return res +} diff --git a/response_test.go b/response_test.go new file mode 100644 index 0000000..7323a1d --- /dev/null +++ b/response_test.go @@ -0,0 +1,25 @@ +package sip + +import ( + "bufio" + "bytes" + "fmt" + "testing" +) + +func TestReadResponse(t *testing.T) { + s := []byte(`SIP/2.0 100 Trying +Via: SIP/2.0/UDP 192.168.4.169:5060;branch=z9hG4bK-524287-1---c34e1fe4153b4900 +From: ;tag=dd669b44 +To: +Call-ID: 42VFkMGXZKZJ9Bz5Jfs3GQ.. +CSeq: 2 INVITE +User-Agent: FreeSWITCH-mod_sofia/1.8.6~64bit +Content-Length: 0 +`) + if r, err := ReadResponse(bufio.NewReader(bytes.NewReader(s))); err != nil { + t.Error(err) + } else { + fmt.Println(r) + } +} diff --git a/status.go b/status.go new file mode 100644 index 0000000..2338a65 --- /dev/null +++ b/status.go @@ -0,0 +1,157 @@ +package sip + +const ( + MaxCseq = 2147483647 +) + +const ( + StatusTrying = 100 //An attempt is made to transfer the call. + StatusRinging = 180 //An attempt is made to ring from the called party. + StatusCallIsBeingForwarded = 181 //The call is forwarded. + StatusQueued = 182 //The call is on hold. + StatusSessionProgress = 183 //The connection is established. + StatusEarlyDialogTerminated = 199 //The dialog was closed during connection setup. + StatusOK = 200 //The request has been processed successfully and the result of the request is transferred in the response. + StatusAccepted = 202 //The request has been accepted, but will be executed at a later time. + StatusNoNotification = 204 //The request was executed successfully, but the corresponding response is deliberately not sent. + StatusMultipleChoices = 300 //There is no unique destination address for the remote terminal. + StatusMovedPermanently = 301 //The called party is permanently reachable somewhere else. + StatusMovedTemporarily = 302 //The called party is temporarily reachable somewhere else. + StatusUseProxy = 305 //The specified proxy must be used. + StatusAlternativeService = 380 //The call was not successful, but alternative services are available. + StatusBadRequest = 400 //The SIP request is incorrect. + StatusUnauthorized = 401 //The authorization is incorrect. + StatusPaymentRequired = 402 //Not yet defined; intended for "not enough credit available". + StatusForbidden = 403 //The request was invalid. + StatusNotFound = 404 //The remote terminal was not found or does not exist. + StatusMethodNotAllowed = 405 //The method of the request (for example, SUBSCRIBE or NOTIFY) is not allowed. + StatusNotAcceptable = 406 //The call options are not allowed. + StatusProxyAuthenticationRequired = 407 //The proxy needs authorization. + StatusRequestTimeout = 408 //Timeout - The remote peer does not respond within a reasonable time. + StatusGone = 410 //The desired subscriber can no longer be reached at the specified address. + StatusConditionalRequestFailed = 412 //The prerequisites for processing the request could not be met because a request required for this failed. + StatusRequestEntityTooLarge = 413 //The message content is too large. + StatusRequestURITooLong = 414 //The SIP address (URI) of the request is too long. + StatusUnsupportedMediaType = 415 //The codec is not supported. + StatusUnsupportedURIScheme = 416 //The SIP address is incorrect. + StatusUnknownResourcePriority = 417 //The request should be treated with a certain priority, but the server does not understand the details. + StatusBadExtension = 420 //The server does not understand a protocol extension. + StatusExtensionRequired = 421 //The server needs a protocol extension. + StatusSessionIntervalTooSmall = 422 //The Session Expires value is too low for the server. + StatusIntervalTooBrief = 423 //The value of the desired machining time is too short. + StatusUseIdentityHeader = 428 //The identity header is missing. + StatusProvideReferrerIdentity = 429 //No valid referred by token is specified. + StatusFlowFailed = 430 //The particular routing failed (proxy internal, endpoints should treat the response like code 400). + StatusAnonymityDisallowed = 433 //The server refuses to process anonymous requests. + StatusBadIdentityInfo = 436 //The SIP address contained in the identity header is invalid, unavailable, or not supported. + StatusUnsupportedCertificate = 437 //The verifier cannot verify the certificate in the identity header. + StatusInvalidIdentityHeader = 438 //The certificate in the identity header is invalid. + StatusFirstHopLacksOutboundSupport = 439 //The registrar supports outbound feature, but the proxy used does not. + StatusMaxBreadthExceeded = 440 //It is no longer possible to derive concurrent forks from the query. + StatusBadInfoPackage = 469 //Unsuitable Info-Package - Transmission error, resend. + StatusConsentNeeded = 470 //The server has no access rights to at least one of the specified SIP addresses. + StatusTemporarilyUnavailable = 480 //The called party is currently not reachable. + StatusCallTransactionDoesNotExist = 481 //This connection does not exist (anymore). + StatusLoopDetected = 482 //A forwarding loop has been detected. + StatusTooManyHops = 483 //Too many forwarding steps were identified. + StatusAddressIncomplete = 484 //The SIP address is incomplete. + StatusAmbiguous = 485 //The SIP address cannot be uniquely resolved. + StatusBusyHere = 486 //The called party is busy. + StatusRequestTerminated = 487 //The call attempt was aborted. + StatusNotAcceptableHere = 488 //Illegal call attempt. + StatusBadEvent = 489 //The server does not know the specified event. + StatusRequestPending = 491 //A request from the same dialog is still being processed. + StatusUndecipherable = 493 //The request contains an encrypted MIME body that the recipient cannot decrypt. + StatusSecurityAgreementRequired = 494 //The request requires a security agreement, but does not include a security mechanism supported by the server. + StatusServerInternalError = 500 //Internal server error. + StatusNotImplemented = 501 //The server does not support the SIP request. + StatusBadGateway = 502 //The gateway in the SIP request is corrupted. + StatusServiceUnavailable = 503 //The server's SIP service is temporarily unavailable. + StatusServerTimeout = 504 //The server cannot reach another server in a reasonable time. + StatusVersionNotSupported = 505 //The SIP protocol version is not supported by the server. + StatusMessageTooLarge = 513 //The SIP message is too large for UDP; TCP must be used. + StatusPreconditionFailure = 580 //The server cannot or does not want to meet the requirements for processing the request. + StatusBusyEverywhere = 600 //All terminal devices of the called subscriber are occupied. + StatusDeclined = 603 //The called party has rejected the call attempt. + StatusDoesNotExistAnywhere = 604 //The called party no longer exists. + StatusPartyHangsUp = 701 //The called party has hung up. +) + +var statusText = map[int]string{ + StatusTrying: "Trying", + StatusRinging: "Ringing", + StatusCallIsBeingForwarded: "Call Is Being Forwarded", + StatusQueued: "Queued", + StatusSessionProgress: "Session Progress", + StatusEarlyDialogTerminated: "Early Dialog Terminated", + StatusOK: "OK", + StatusAccepted: "Accepted", + StatusNoNotification: "No Notification", + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusMovedTemporarily: "Moved Temporarily", + StatusUseProxy: "Use Proxy", + StatusAlternativeService: "Alternative Service", + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthenticationRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusGone: "Gone", + StatusConditionalRequestFailed: "Conditional Request Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusUnsupportedURIScheme: "Unsupported URI Scheme", + StatusUnknownResourcePriority: "Unknown Resource-Priority", + StatusBadExtension: "Bad Extension", + StatusExtensionRequired: "Extension Required", + StatusSessionIntervalTooSmall: "Session Interval Too Small", + StatusIntervalTooBrief: "Interval Too Brief", + StatusUseIdentityHeader: "Use Identity Header", + StatusProvideReferrerIdentity: "Provide Referrer Identity", + StatusFlowFailed: "Flow Failed", + StatusAnonymityDisallowed: "Anonymity Disallowed", + StatusBadIdentityInfo: "Bad Identity-Info", + StatusUnsupportedCertificate: "Unsupported Certificate", + StatusInvalidIdentityHeader: "Invalid Identity Header", + StatusFirstHopLacksOutboundSupport: "First Hop Lacks Outbound Support", + StatusMaxBreadthExceeded: "Max-Breadth Exceeded", + StatusBadInfoPackage: "Bad Info Package", + StatusConsentNeeded: "Consent Needed", + StatusTemporarilyUnavailable: "Temporarily Unavailable", + StatusCallTransactionDoesNotExist: "Call/Transaction Does Not Exist", + StatusLoopDetected: "Loop Detected", + StatusTooManyHops: "Too Many Hops", + StatusAddressIncomplete: "Address Incomplete", + StatusAmbiguous: "Ambiguous", + StatusBusyHere: "Busy Here", + StatusRequestTerminated: "Request Terminated", + StatusNotAcceptableHere: "Not Acceptable Here", + StatusBadEvent: "Bad Event", + StatusRequestPending: "Request Pending", + StatusUndecipherable: "Undecipherable", + StatusSecurityAgreementRequired: "Security Agreement Required", + StatusServerInternalError: "Server Internal Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusServerTimeout: "Server Time-out", + StatusVersionNotSupported: "Version Not Supported", + StatusMessageTooLarge: "Message Too Large", + StatusPreconditionFailure: "Precondition Failure", + StatusBusyEverywhere: "Busy Everywhere", + StatusDeclined: "Declined", + StatusDoesNotExistAnywhere: "Does Not Exist Anywhere", + StatusPartyHangsUp: "Party Hangs Up", +} + +// StatusText returns a text for the HTTP status code. It returns the empty +// string if the code is unknown. +func StatusText(code int) string { + return statusText[code] +} diff --git a/transaction.go b/transaction.go new file mode 100644 index 0000000..b57d6ef --- /dev/null +++ b/transaction.go @@ -0,0 +1,39 @@ +package sip + +import ( + "time" +) + +type Transaction struct { + ID string + CreatedAt time.Time + c chan *Response +} + +func (t *Transaction) notify(res *Response) { + if t.c != nil { + select { + case t.c <- res: + default: + } + } +} + +//Chan 获取事件 +func (t *Transaction) Chan() chan *Response { + return t.c +} + +//Close 关闭事物 +func (t *Transaction) Close() (err error) { + close(t.c) + return +} + +func newTransaction(id string) *Transaction { + return &Transaction{ + ID: id, + CreatedAt: time.Now(), + c: make(chan *Response, 1), + } +} diff --git a/transport.go b/transport.go new file mode 100644 index 0000000..c24b39d --- /dev/null +++ b/transport.go @@ -0,0 +1,25 @@ +package sip + +import ( + "context" + "net" +) + +const ( + ProtoUDP = "UDP" + ProtoTCP = "TCP" +) + +type ( + ProcessFunc func(res *Response) (handled bool, err error) + + Transport interface { + Dial(addr string) (err error) + Protocol() string + Conn() net.Conn + Request() chan *Request + Do(ctx context.Context, req *Request, fun ProcessFunc) (err error) + Write(p []byte) (n int, err error) + Close() (err error) + } +) diff --git a/udp_transport.go b/udp_transport.go new file mode 100644 index 0000000..cf6c4f7 --- /dev/null +++ b/udp_transport.go @@ -0,0 +1,180 @@ +package sip + +import ( + "bytes" + "context" + "github.com/uole/sip/pool" + "io" + "log" + "net" + "sync" + "time" +) + +var ( + responseFeature = []byte("SIP") +) + +type UDPTransport struct { + conn *net.UDPConn + transMutex sync.RWMutex + reqChan chan *Request + transactions []*Transaction +} + +func (tp *UDPTransport) Protocol() string { + return ProtoUDP +} + +func (tp *UDPTransport) Conn() net.Conn { + return tp.conn +} + +func (tp *UDPTransport) Request() chan *Request { + return tp.reqChan +} + +//traceTransaction 提交一个事物 +func (tp *UDPTransport) traceTransaction(t *Transaction) { + tp.transMutex.RLock() + defer tp.transMutex.RUnlock() + if tp.transactions == nil { + tp.transactions = make([]*Transaction, 0) + } + tp.transactions = append(tp.transactions, t) +} + +//notifyTransaction 通知一个事物完成 +func (tp *UDPTransport) notifyTransaction(res *Response) (err error) { + tp.transMutex.Lock() + defer tp.transMutex.Unlock() + callID := res.CallID() + for _, trans := range tp.transactions { + if trans.ID == callID { + trans.notify(res) + break + } + } + return +} + +//releaseTransaction 释放一个指定的事物 +func (tp *UDPTransport) releaseTransaction(trans *Transaction) { + tp.transMutex.Lock() + defer tp.transMutex.Unlock() + for i, v := range tp.transactions { + if trans.ID == v.ID { + tp.transactions = append(tp.transactions[:i], tp.transactions[i+1:]...) + return + } + } +} + +//Dial 新建立一个连接 +func (tp *UDPTransport) Dial(addr string) (err error) { + var udpAddr *net.UDPAddr + if udpAddr, err = net.ResolveUDPAddr("udp", addr); err != nil { + return + } + if tp.conn, err = net.DialUDP("udp", nil, udpAddr); err != nil { + return + } + go tp.exchange() + return +} + +//exchange +func (tp *UDPTransport) exchange() { + var ( + n int + err error + buf []byte + p []byte + res *Response + req *Request + addr *net.UDPAddr + isResponse bool + ) + buf = make([]byte, 1024*10) + for { + if n, err = tp.conn.Read(buf); err != nil { + continue + } + if n < 3 { + continue + } + p = buf[:n] + //parse the body + bytesReader := pool.GetBytesReader(buf[:n]) + bufioReader := pool.GetBufioReader(bytesReader) + if bytes.Compare(p[:3], responseFeature) == 0 { + res, err = ReadResponse(bufioReader) + isResponse = true + } else { + req, err = ReadRequest(bufioReader) + isResponse = false + } + pool.PutBytesReader(bytesReader) + pool.PutBufioReader(bufioReader) + //parse failed + if err != nil { + log.Printf("parse buffer from %s: %s error: %s", addr.String(), string(p), err.Error()) + continue + } + if isResponse { + err = tp.notifyTransaction(res) + } else { + select { + case tp.reqChan <- req: + case <-time.After(time.Millisecond * 200): + log.Printf("put %s request timeout", req.Method) + } + } + } +} + +func (tp *UDPTransport) Write(p []byte) (n int, err error) { + if tp.conn != nil { + return tp.conn.Write(p) + } else { + err = io.ErrClosedPipe + } + return +} + +func (tp *UDPTransport) Do(ctx context.Context, req *Request, callback ProcessFunc) (err error) { + var ( + ok bool + res *Response + trans *Transaction + ) + if _, err = tp.conn.Write(req.Bytes()); err != nil { + return + } + trans = newTransaction(req.CallID()) + tp.traceTransaction(trans) + defer tp.releaseTransaction(trans) + for { + select { + case res = <-trans.Chan(): + ok, err = callback(res) + if ok || err != nil { + return + } + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + +func (tp *UDPTransport) Close() (err error) { + if tp.conn != nil { + err = tp.conn.Close() + } + return +} + +func NewUDPTransport() Transport { + return &UDPTransport{reqChan: make(chan *Request, 100)} +} diff --git a/uri.go b/uri.go new file mode 100644 index 0000000..b205473 --- /dev/null +++ b/uri.go @@ -0,0 +1,270 @@ +package sip + +import ( + "net" + "net/url" + "strconv" + "strings" +) + +type ( + Uri struct { + IsEncrypted bool + HasProtocol bool + User string + Password string + Host string + Port int + Params Map //params + Queries Map //queries + } + + Map map[string]string +) + +func NewUri(user, addr string, ps Map) *Uri { + uri := &Uri{ + IsEncrypted: false, + User: user, + Params: ps, + Queries: Map{}, + } + uri.SetAddress(addr) + return uri +} + +func (uri *Uri) EnableProtocol() *Uri { + uri.HasProtocol = true + return uri +} + +func (uri *Uri) SetParams(m Map) *Uri { + uri.Params = m + return uri +} + +func (uri *Uri) SetAddress(addr string) *Uri { + if strings.Index(addr, ":") > -1 { + if host, port, err := net.SplitHostPort(addr); err == nil { + uri.Host = host + uri.Port, _ = strconv.Atoi(port) + } + } else { + uri.Host = addr + } + return uri +} + +func (uri *Uri) Address() string { + return net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) +} + +func (m *Map) Set(k, v string) { + if m == nil || len(*m) == 0 { + *m = make(map[string]string) + } + mp := *m + mp[k] = v +} + +func (m Map) Get(k string) string { + if m == nil { + return "" + } + return m[k] +} + +func (m Map) ToString(sep string) string { + var ( + i int + length int + sb strings.Builder + ) + length = len(m) + for k, v := range m { + sb.WriteString(k) + if len(v) > 0 { + sb.WriteString("=") + if strings.ContainsAny(v, " \t") { + sb.WriteString("\"") + sb.WriteString(v) + sb.WriteString("\"") + } else { + sb.WriteString(v) + } + } + i++ + if i < length { + sb.WriteString(sep) + } + } + return sb.String() +} + +func (m Map) String() string { + return m.ToString(";") +} + +func (m Map) Clone() Map { + mm := make(map[string]string) + for k, v := range m { + mm[k] = v + } + return mm +} + +func (uri *Uri) ParamValue(name string) string { + return uri.Params.Get(name) +} + +func (uri *Uri) FormValue(name string) string { + return uri.Queries.Get(name) +} + +func (uri *Uri) Clone() *Uri { + u := &Uri{ + HasProtocol: uri.HasProtocol, + IsEncrypted: uri.IsEncrypted, + User: uri.User, + Password: uri.Password, + Host: uri.Host, + Port: uri.Port, + } + if uri.Params != nil { + u.Params = uri.Params.Clone() + } + if uri.Queries != nil { + u.Queries = uri.Queries.Clone() + } + return u +} + +func (uri *Uri) String() string { + var sb strings.Builder + // Compulsory protocol identifier. + if uri.HasProtocol { + if uri.IsEncrypted { + sb.WriteString("sips") + sb.WriteString(":") + } else { + sb.WriteString("sip") + sb.WriteString(":") + } + } + if uri.User != "" { + sb.WriteString(uri.User) + if uri.Password != "" { + sb.WriteString(":" + uri.Password) + } + sb.WriteString("@") + } + // Compulsory hostname. + sb.WriteString(uri.Host) + // Optional port number. + if uri.Port != 0 { + sb.WriteString(":") + sb.WriteString(strconv.Itoa(int(uri.Port))) + } + if (uri.Params != nil) && len(uri.Params) > 0 { + sb.WriteString(";") + sb.WriteString(uri.Params.String()) + } + if (uri.Queries != nil) && len(uri.Queries) > 0 { + sb.WriteString("?") + sb.WriteString(uri.Queries.ToString("&")) + } + return sb.String() +} + +func parseMap(s string) (m Map, err error) { + for s != "" { + key := s + if i := strings.IndexAny(key, "&;"); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if len(value) > 0 { + value = strings.Trim(value, "\"") + value = strings.TrimSpace(value) + } + m.Set(key, value) + } + return +} + +//parseUri parse uri from string +func parseUri(s string) (uri *Uri, err error) { + var ( + pos int + endOfUserPos int + netAddrStr string + netSplitPos int + p string + ) + uri = &Uri{} + p = s + //sip or sips + if pos = strings.Index(p, ":"); pos > -1 && pos < 4 { + uri.HasProtocol = true + if strings.ToLower(p[:pos]) == "sips" { + uri.IsEncrypted = true + } + p = p[pos+1:] + } + if pos = strings.Index(p, "@"); pos != -1 { + if endOfUserPos = strings.Index(p[:pos], ":"); endOfUserPos == -1 { + uri.User = p[:pos] + } else { + uri.User = p[:endOfUserPos] + uri.Password = p[endOfUserPos+1 : pos] + } + p = p[pos+1:] + } + if pos = strings.IndexAny(p, ";?"); pos == -1 { + netAddrStr = p + } else { + netAddrStr = p[:pos] + } + if netSplitPos = strings.Index(netAddrStr, ":"); netSplitPos == -1 { + uri.Host = netAddrStr + } else { + uri.Host = netAddrStr[:netSplitPos] + uri.Port, _ = strconv.Atoi(netAddrStr[netSplitPos+1:]) + } + if pos > -1 { + p = p[pos:] + if p[0] == '?' { //queries + uri.Queries, err = parseMap(p[1:]) + } else { //params + if pos = strings.Index(p, "?"); pos == -1 { + uri.Params, err = parseMap(p[1:]) + } else { + uri.Params, err = parseMap(p[1:pos]) + uri.Queries, err = parseMap(p[pos+1:]) + } + } + } + return +} diff --git a/uri_test.go b/uri_test.go new file mode 100644 index 0000000..a3de755 --- /dev/null +++ b/uri_test.go @@ -0,0 +1,22 @@ +package sip + +import ( + "fmt" + "testing" +) + +func TestMap_Set(t *testing.T) { + var m Map + m.Set("a", "b") + m.Set("aaa", "sdas\tdsds") + t.Log(m.Get("a")) + t.Log(m.String()) +} + +func Test_parseUri(t *testing.T) { + if uri, err := parseUri("sip:1000:15625229038@192.168.4.169:40828;rinstance=e7be6d7faa64ed3f;transport=tcp?a=b&c=\"dsa\t !#$$ d\"&d=f"); err != nil { + t.Error(err) + } else { + fmt.Println(uri) + } +} diff --git a/utils.go b/utils.go new file mode 100644 index 0000000..6afce33 --- /dev/null +++ b/utils.go @@ -0,0 +1,9 @@ +package sip + +import "crypto/md5" + +func MD5(b []byte) []byte { + hash := md5.New() + hash.Write(b) + return hash.Sum(nil) +} diff --git a/vendor/git.nspix.com/golang/micro/helper/random/int.go b/vendor/git.nspix.com/golang/micro/helper/random/int.go new file mode 100644 index 0000000..d46da2f --- /dev/null +++ b/vendor/git.nspix.com/golang/micro/helper/random/int.go @@ -0,0 +1,11 @@ +package random + +import ( + "math/rand" + "time" +) + +func Int(min, max int64) int64 { + rand.Seed(time.Now().UnixNano()) + return min + rand.Int63n(max-min) +} \ No newline at end of file diff --git a/vendor/git.nspix.com/golang/micro/helper/random/ip.go b/vendor/git.nspix.com/golang/micro/helper/random/ip.go new file mode 100644 index 0000000..2aad1ea --- /dev/null +++ b/vendor/git.nspix.com/golang/micro/helper/random/ip.go @@ -0,0 +1,21 @@ +package random + +import ( + "strconv" + "strings" +) + +var ( + ipSet = strings.Split("58.14.0.0,58.16.0.0,58.24.0.0,58.30.0.0,58.32.0.0,58.66.0.0,58.68.128.0,58.82.0.0,58.87.64.0,58.99.128.0,58.100.0.0,58.116.0.0,58.128.0.0,58.144.0.0,58.154.0.0,58.192.0.0,58.240.0.0,59.32.0.0,59.64.0.0,59.80.0.0,59.107.0.0,59.108.0.0,59.151.0.0,59.155.0.0,59.172.0.0,59.191.0.0,59.191.240.0,59.192.0.0,60.0.0.0,60.55.0.0,60.63.0.0,60.160.0.0,60.194.0.0,60.200.0.0,60.208.0.0,60.232.0.0,60.235.0.0,60.245.128.0,60.247.0.0,60.252.0.0,60.253.128.0,60.255.0.0,61.4.80.0,61.4.176.0,61.8.160.0,61.28.0.0,61.29.128.0,61.45.128.0,61.47.128.0,61.48.0.0,61.87.192.0,61.128.0.0,61.232.0.0,61.236.0.0,61.240.0.0,114.28.0.0,114.54.0.0,114.60.0.0,114.64.0.0,114.68.0.0,114.80.0.0,116.1.0.0,116.2.0.0,116.4.0.0,116.8.0.0,116.13.0.0,116.16.0.0,116.52.0.0,116.56.0.0,116.58.128.0,116.58.208.0,116.60.0.0,116.66.0.0,116.69.0.0,116.70.0.0,116.76.0.0,116.89.144.0,116.90.184.0,116.95.0.0,116.112.0.0,116.116.0.0,116.128.0.0,116.192.0.0,116.193.16.0,116.193.32.0,116.194.0.0,116.196.0.0,116.198.0.0,116.199.0.0,116.199.128.0,116.204.0.0,116.207.0.0,116.208.0.0,116.212.160.0,116.213.64.0,116.213.128.0,116.214.32.0,116.214.64.0,116.214.128.0,116.215.0.0,116.216.0.0,116.224.0.0,116.242.0.0,116.244.0.0,116.248.0.0,116.252.0.0,116.254.128.0,116.255.128.0,117.8.0.0,117.21.0.0,117.22.0.0,117.24.0.0,117.32.0.0,117.40.0.0,117.44.0.0,117.48.0.0,117.53.48.0,117.53.176.0,117.57.0.0,117.58.0.0,117.59.0.0,117.60.0.0,117.64.0.0,117.72.0.0,117.74.64.0,117.74.128.0,117.75.0.0,117.76.0.0,117.80.0.0,117.100.0.0,117.103.16.0,117.103.128.0,117.106.0.0,117.112.0.0,117.120.64.0,117.120.128.0,117.121.0.0,117.121.128.0,117.121.192.0,117.122.128.0,117.124.0.0,117.128.0.0,118.24.0.0,118.64.0.0,118.66.0.0,118.67.112.0,118.72.0.0,118.80.0.0,118.84.0.0,118.88.32.0,118.88.64.0,118.88.128.0,118.89.0.0,118.91.240.0,118.102.16.0,118.112.0.0,118.120.0.0,118.124.0.0,118.126.0.0,118.132.0.0,118.144.0.0,118.178.0.0,118.180.0.0,118.184.0.0,118.192.0.0,118.212.0.0,118.224.0.0,118.228.0.0,118.230.0.0,118.239.0.0,118.242.0.0,118.244.0.0,118.248.0.0,119.0.0.0,119.2.0.0,119.2.128.0,119.3.0.0,119.4.0.0,119.8.0.0,119.10.0.0,119.15.136.0,119.16.0.0,119.18.192.0,119.18.208.0,119.18.224.0,119.19.0.0,119.20.0.0,119.27.64.0,119.27.160.0,119.27.192.0,119.28.0.0,119.30.48.0,119.31.192.0,119.32.0.0,119.40.0.0,119.40.64.0,119.40.128.0,119.41.0.0,119.42.0.0,119.42.136.0,119.42.224.0,119.44.0.0,119.48.0.0,119.57.0.0,119.58.0.0,119.59.128.0,119.60.0.0,119.62.0.0,119.63.32.0,119.75.208.0,119.78.0.0,119.80.0.0,119.84.0.0,119.88.0.0,119.96.0.0,119.108.0.0,119.112.0.0,119.128.0.0,119.144.0.0,119.148.160.0,119.161.128.0,119.162.0.0,119.164.0.0,119.176.0.0,119.232.0.0,119.235.128.0,119.248.0.0,119.253.0.0,119.254.0.0,120.0.0.0,120.24.0.0,120.30.0.0,120.32.0.0,120.48.0.0,120.52.0.0,120.64.0.0,120.72.32.0,120.72.128.0,120.76.0.0,120.80.0.0,120.90.0.0,120.92.0.0,120.94.0.0,120.128.0.0,120.136.128.0,120.137.0.0,120.192.0.0,121.0.16.0,121.4.0.0,121.8.0.0,121.16.0.0,121.32.0.0,121.40.0.0,121.46.0.0,121.48.0.0,121.51.0.0,121.52.160.0,121.52.208.0,121.52.224.0,121.55.0.0,121.56.0.0,121.58.0.0,121.58.144.0,121.59.0.0,121.60.0.0,121.68.0.0,121.76.0.0,121.79.128.0,121.89.0.0,121.100.128.0,121.101.208.0,121.192.0.0,121.201.0.0,121.204.0.0,121.224.0.0,121.248.0.0,121.255.0.0,122.0.64.0,122.0.128.0,122.4.0.0,122.8.0.0,122.48.0.0,122.49.0.0,122.51.0.0,122.64.0.0,122.96.0.0,122.102.0.0,122.102.64.0,122.112.0.0,122.119.0.0,122.136.0.0,122.144.128.0,122.152.192.0,122.156.0.0,122.192.0.0,122.198.0.0,122.200.64.0,122.204.0.0,122.224.0.0,122.240.0.0,122.248.48.0,123.0.128.0,123.4.0.0,123.8.0.0,123.49.128.0,123.52.0.0,123.56.0.0,123.64.0.0,123.96.0.0,123.98.0.0,123.99.128.0,123.100.0.0,123.101.0.0,123.103.0.0,123.108.128.0,123.108.208.0,123.112.0.0,123.128.0.0,123.136.80.0,123.137.0.0,123.138.0.0,123.144.0.0,123.160.0.0,123.176.80.0,123.177.0.0,123.178.0.0,123.180.0.0,123.184.0.0,123.196.0.0,123.199.128.0,123.206.0.0,123.232.0.0,123.242.0.0,123.244.0.0,123.249.0.0,123.253.0.0,124.6.64.0,124.14.0.0,124.16.0.0,124.20.0.0,124.28.192.0,124.29.0.0,124.31.0.0,124.40.112.0,124.40.128.0,124.42.0.0,124.47.0.0,124.64.0.0,124.66.0.0,124.67.0.0,124.68.0.0,124.72.0.0,124.88.0.0,124.108.8.0,124.108.40.0,124.112.0.0,124.126.0.0,124.128.0.0,124.147.128.0,124.156.0.0,124.160.0.0,124.172.0.0,124.192.0.0,124.196.0.0,124.200.0.0,124.220.0.0,124.224.0.0,124.240.0.0,124.240.128.0,124.242.0.0,124.243.192.0,124.248.0.0,124.249.0.0,124.250.0.0,124.254.0.0,125.31.192.0,125.32.0.0,125.58.128.0,125.61.128.0,125.62.0.0,125.64.0.0,125.96.0.0,125.98.0.0,125.104.0.0,125.112.0.0,125.169.0.0,125.171.0.0,125.208.0.0,125.210.0.0,125.213.0.0,125.214.96.0,125.215.0.0,125.216.0.0,125.254.128.0,134.196.0.0,159.226.0.0,161.207.0.0,162.105.0.0,166.111.0.0,167.139.0.0,168.160.0.0,169.211.1.0,192.83.122.0,192.83.169.0,192.124.154.0,192.188.170.0,198.17.7.0,202.0.110.0,202.0.176.0,202.4.128.0,202.4.252.0,202.8.128.0,202.10.64.0,202.14.88.0,202.14.235.0,202.14.236.0,202.14.238.0,202.20.120.0,202.22.248.0,202.38.0.0,202.38.64.0,202.38.128.0,202.38.136.0,202.38.138.0,202.38.140.0,202.38.146.0,202.38.149.0,202.38.150.0,202.38.152.0,202.38.156.0,202.38.158.0,202.38.160.0,202.38.164.0,202.38.168.0,202.38.176.0,202.38.184.0,202.38.192.0,202.41.152.0,202.41.240.0,202.43.144.0,202.46.32.0,202.46.224.0,202.60.112.0,202.63.248.0,202.69.4.0,202.69.16.0,202.70.0.0,202.74.8.0,202.75.208.0,202.85.208.0,202.90.0.0,202.90.224.0,202.90.252.0,202.91.0.0,202.91.128.0,202.91.176.0,202.91.224.0,202.92.0.0,202.92.252.0,202.93.0.0,202.93.252.0,202.95.0.0,202.95.252.0,202.96.0.0,202.112.0.0,202.120.0.0,202.122.0.0,202.122.32.0,202.122.64.0,202.122.112.0,202.122.128.0,202.123.96.0,202.124.24.0,202.125.176.0,202.127.0.0,202.127.12.0,202.127.16.0,202.127.40.0,202.127.48.0,202.127.112.0,202.127.128.0,202.127.160.0,202.127.192.0,202.127.208.0,202.127.212.0,202.127.216.0,202.127.224.0,202.130.0.0,202.130.224.0,202.131.16.0,202.131.48.0,202.131.208.0,202.136.48.0,202.136.208.0,202.136.224.0,202.141.160.0,202.142.16.0,202.143.16.0,202.148.96.0,202.149.160.0,202.149.224.0,202.150.16.0,202.152.176.0,202.153.48.0,202.158.160.0,202.160.176.0,202.164.0.0,202.164.25.0,202.165.96.0,202.165.176.0,202.165.208.0,202.168.160.0,202.170.128.0,202.170.216.0,202.173.8.0,202.173.224.0,202.179.240.0,202.180.128.0,202.181.112.0,202.189.80.0,202.192.0.0,203.18.50.0,203.79.0.0,203.80.144.0,203.81.16.0,203.83.56.0,203.86.0.0,203.86.64.0,203.88.32.0,203.88.192.0,203.89.0.0,203.90.0.0,203.90.128.0,203.90.192.0,203.91.32.0,203.91.96.0,203.91.120.0,203.92.0.0,203.92.160.0,203.93.0.0,203.94.0.0,203.95.0.0,203.95.96.0,203.99.16.0,203.99.80.0,203.100.32.0,203.100.80.0,203.100.96.0,203.100.192.0,203.110.160.0,203.118.192.0,203.119.24.0,203.119.32.0,203.128.32.0,203.128.96.0,203.130.32.0,203.132.32.0,203.134.240.0,203.135.96.0,203.135.160.0,203.142.219.0,203.148.0.0,203.152.64.0,203.156.192.0,203.158.16.0,203.161.192.0,203.166.160.0,203.171.224.0,203.174.7.0,203.174.96.0,203.175.128.0,203.175.192.0,203.176.168.0,203.184.80.0,203.187.160.0,203.190.96.0,203.191.16.0,203.191.64.0,203.191.144.0,203.192.0.0,203.196.0.0,203.207.64.0,203.207.128.0,203.208.0.0,203.208.16.0,203.208.32.0,203.209.224.0,203.212.0.0,203.212.80.0,203.222.192.0,203.223.0.0,210.2.0.0,210.5.0.0,210.5.144.0,210.12.0.0,210.14.64.0,210.14.112.0,210.14.128.0,210.15.0.0,210.15.128.0,210.16.128.0,210.21.0.0,210.22.0.0,210.23.32.0,210.25.0.0,210.26.0.0,210.28.0.0,210.32.0.0,210.51.0.0,210.52.0.0,210.56.192.0,210.72.0.0,210.76.0.0,210.78.0.0,210.79.64.0,210.79.224.0,210.82.0.0,210.87.128.0,210.185.192.0,210.192.96.0,211.64.0.0,211.80.0.0,211.96.0.0,211.136.0.0,211.144.0.0,211.160.0.0,218.0.0.0,218.56.0.0,218.64.0.0,218.96.0.0,218.104.0.0,218.108.0.0,218.185.192.0,218.192.0.0,218.240.0.0,218.249.0.0,219.72.0.0,219.82.0.0,219.128.0.0,219.216.0.0,219.224.0.0,219.242.0.0,219.244.0.0,220.101.192.0,220.112.0.0,220.152.128.0,220.154.0.0,220.160.0.0,220.192.0.0,220.231.0.0,220.231.128.0,220.232.64.0,220.234.0.0,220.242.0.0,220.248.0.0,220.252.0.0,221.0.0.0,221.8.0.0,221.12.0.0,221.12.128.0,221.13.0.0,221.14.0.0,221.122.0.0,221.129.0.0,221.130.0.0,221.133.224.0,221.136.0.0,221.172.0.0,221.176.0.0,221.192.0.0,221.196.0.0,221.198.0.0,221.199.0.0,221.199.128.0,221.199.192.0,221.199.224.0,221.200.0.0,221.208.0.0,221.224.0.0,222.16.0.0,222.32.0.0,222.64.0.0,222.125.0.0,222.126.128.0,222.128.0.0,222.160.0.0,222.168.0.0,222.176.0.0,222.192.0.0,222.240.0.0,222.248.0.0", ",") +) + +func IP() string { + ip := ipSet[Int(0, int64(len(ipSet))-1)] + result := strings.Split(ip, ".") + for k, v := range result { + if v == "0" { + result[k] = strconv.Itoa(int(Int(0, 255))) + } + } + return strings.Join(result, ".") +} diff --git a/vendor/git.nspix.com/golang/micro/helper/random/string.go b/vendor/git.nspix.com/golang/micro/helper/random/string.go new file mode 100644 index 0000000..24ba24a --- /dev/null +++ b/vendor/git.nspix.com/golang/micro/helper/random/string.go @@ -0,0 +1,28 @@ +package random + +import ( + "math/rand" + "strings" +) + +const ( + Uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + Lowercase = "abcdefghijklmnopqrstuvwxyz" + Alphabetic = Uppercase + Lowercase + Numeric = "0123456789" + Alphanumeric = Alphabetic + Numeric + Symbols = "`" + `~!@#$%^&*()-_+={}[]|\;:"<>,./?` + Hex = Numeric + "abcdef" +) + +func String(length uint8, charsets ...string) string { + charset := strings.Join(charsets, "") + if charset == "" { + charset = Alphanumeric + } + b := make([]byte, length) + for i := range b { + b[i] = charset[rand.Int63()%int64(len(charset))] + } + return string(b) +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000..d8156a6 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..04fdf09 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b4bb97f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000..f765a46 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000..fa820b9 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000..5b8a4b9 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000..b404f4b --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000..14bd340 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000..d651a2b --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000..24b78ed --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000..0cbbcdd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 0000000..d7fcbf2 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000..2e02ec0 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000..e6ef06c --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000..5ea6c73 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000..a57207a --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000..4631096 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000..7697802 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 0000000..c73bb33 --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 0000000..b37da15 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE new file mode 100644 index 0000000..47c5e9d --- /dev/null +++ b/vendor/github.com/rs/xid/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 0000000..7981887 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,115 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using base32 hex (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 0000000..08351ff --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,9 @@ +// +build darwin + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.uuid") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 0000000..7fbd3c0 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 0000000..be25a03 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 0000000..837b204 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 0000000..ec2593e --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer syscall.RegCloseKey(h) + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 0000000..f1db1a1 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,380 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" + "unsafe" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID + ErrInvalidID = errors.New("xid: invalid ID") + + // objectIDCounter is atomically incremented when generating a new ObjectId + // using NewObjectId() function. It's used as a counter part of an id. + // This id is initialized with a random value. + objectIDCounter = randInt() + + // machineId stores machine id generated once and used in subsequent calls + // to NewObjectId function. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := md5.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine, first 3 bytes of md5(hostname) + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return *(*string)(unsafe.Pointer(&text)) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + decode(id, text) + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + removing all safe checks +func decode(id *ID, src []byte) { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000..055480b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..b50c6e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..d2c2308 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,740 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..129bc2a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..a1c2cc5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..0ee738e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..81d05df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..7c1f5fa --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..4120e0c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..0b9bb60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..4c45e66 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..a2dde60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..89650e2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..f6a9c8e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..e102ba2 --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,12 @@ +# git.nspix.com/golang/micro v1.3.1 +## explicit; go 1.15 +git.nspix.com/golang/micro/helper/random +# github.com/google/uuid v1.3.0 +## explicit +github.com/google/uuid +# github.com/rs/xid v1.3.0 +## explicit; go 1.12 +github.com/rs/xid +# gopkg.in/yaml.v2 v2.3.0 +## explicit +gopkg.in/yaml.v2