Commit 7d82020f authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  librtmp: return AVERROR_UNKNOWN instead of -1.
  librtmp: don't abuse a variable for two unrelated things.
  librtmp: add rtmp_app and rtmp_playpath private options.
  bmv: add stricter checks for invalid decoded length
  avpacket: fix duplicating side data.
  flv: support stream text data as onTextData

Conflicts:
	libavcodec/bmv.c
	libavformat/flvdec.c
	libavformat/flvenc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents e10eac91 19dfbf19
......@@ -152,7 +152,7 @@ int av_dup_packet(AVPacket *pkt)
pkt->side_data_elems * sizeof(*pkt->side_data));
for (i = 0; i < pkt->side_data_elems; i++)
DUP_DATA(pkt->side_data[i].data, tmp_pkt.side_data[i].data,
pkt->side_data[i].size, 1);
tmp_pkt.side_data[i].size, 1);
}
}
return 0;
......
......@@ -143,16 +143,20 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
switch (mode) {
case 1:
if (forward) {
if (dst - frame + SCREEN_WIDE < -frame_off ||
frame_end - dst < frame_off + len)
if (dst - frame + SCREEN_WIDE < frame_off ||
dst - frame + SCREEN_WIDE + frame_off < 0 ||
frame_end - dst < frame_off + len ||
frame_end - dst < len)
return -1;
for (i = 0; i < len; i++)
dst[i] = dst[frame_off + i];
dst += len;
} else {
dst -= len;
if (dst - frame + SCREEN_WIDE < -frame_off ||
frame_end - dst < frame_off + len)
if (dst - frame + SCREEN_WIDE < frame_off ||
dst - frame + SCREEN_WIDE + frame_off < 0 ||
frame_end - dst < frame_off + len ||
frame_end - dst < len)
return -1;
for (i = len - 1; i >= 0; i--)
dst[i] = dst[frame_off + i];
......
......@@ -62,6 +62,21 @@ static int flv_probe(AVProbeData *p)
return 0;
}
static AVStream *create_stream(AVFormatContext *s, int tag, int codec_type){
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
st->id = tag;
st->codec->codec_type = codec_type;
if(s->nb_streams>=3 ||( s->nb_streams==2
&& s->streams[0]->codec->codec_type != AVMEDIA_TYPE_DATA
&& s->streams[1]->codec->codec_type != AVMEDIA_TYPE_DATA))
s->ctx_flags &= ~AVFMTCTX_NOHEADER;
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
return st;
}
static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream, AVCodecContext *acodec, int flv_codecid) {
switch(flv_codecid) {
//no distinction between S16 and S8 PCM codec flags
......@@ -298,6 +313,12 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream, AVStream *vst
vcodec->bit_rate = num_val * 1024.0;
else if (!strcmp(key, "audiodatarate") && acodec && 0 <= (int)(num_val * 1024.0))
acodec->bit_rate = num_val * 1024.0;
else if (!strcmp(key, "datastream")) {
AVStream *st = create_stream(s, 2, AVMEDIA_TYPE_DATA);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = CODEC_ID_TEXT;
}
}
if (amf_type == AMF_DATA_TYPE_OBJECT && s->nb_streams == 1 &&
......@@ -344,7 +365,14 @@ static int flv_read_metabody(AVFormatContext *s, int64_t next_pos) {
//first object needs to be "onMetaData" string
type = avio_r8(ioc);
if(type != AMF_DATA_TYPE_STRING || amf_get_string(ioc, buffer, sizeof(buffer)) < 0 || strcmp(buffer, "onMetaData"))
if (type != AMF_DATA_TYPE_STRING ||
amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
return -1;
if (!strcmp(buffer, "onTextData"))
return 1;
if (strcmp(buffer, "onMetaData"))
return -1;
//find the streams now so that amf_parse_object doesn't need to do the lookup every time it is called.
......@@ -362,28 +390,6 @@ static int flv_read_metabody(AVFormatContext *s, int64_t next_pos) {
return 0;
}
static AVStream *create_stream(AVFormatContext *s, int stream_type){
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
st->id = stream_type;
switch(stream_type) {
case FLV_STREAM_TYPE_VIDEO: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; break;
case FLV_STREAM_TYPE_AUDIO: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; break;
case FLV_STREAM_TYPE_DATA:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = CODEC_ID_NONE; // Going to rely on copy for now
av_log(s, AV_LOG_DEBUG, "Data stream created\n");
}
if(s->nb_streams>=3 ||( s->nb_streams==2
&& s->streams[0]->codec->codec_type != AVMEDIA_TYPE_DATA
&& s->streams[1]->codec->codec_type != AVMEDIA_TYPE_DATA))
s->ctx_flags &= ~AVFMTCTX_NOHEADER;
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
return st;
}
static int flv_read_header(AVFormatContext *s)
{
int offset, flags;
......@@ -399,11 +405,11 @@ static int flv_read_header(AVFormatContext *s)
s->ctx_flags |= AVFMTCTX_NOHEADER;
if(flags & FLV_HEADER_FLAG_HASVIDEO){
if(!create_stream(s, FLV_STREAM_TYPE_VIDEO))
if(!create_stream(s, 0, AVMEDIA_TYPE_VIDEO))
return AVERROR(ENOMEM);
}
if(flags & FLV_HEADER_FLAG_HASAUDIO){
if(!create_stream(s, FLV_STREAM_TYPE_AUDIO))
if(!create_stream(s, 1, AVMEDIA_TYPE_AUDIO))
return AVERROR(ENOMEM);
}
// Flag doesn't indicate whether or not there is script-data present. Must
......@@ -466,6 +472,65 @@ static void clear_index_entries(AVFormatContext *s, int64_t pos)
}
}
static int flv_data_packet(AVFormatContext *s, AVPacket *pkt,
int64_t dts, int64_t next)
{
int ret = AVERROR_INVALIDDATA, i;
AVIOContext *pb = s->pb;
AVStream *st = NULL;
AMFDataType type;
char buf[20];
int length;
type = avio_r8(pb);
if (type == AMF_DATA_TYPE_MIXEDARRAY)
avio_seek(pb, 4, SEEK_CUR);
else if (type != AMF_DATA_TYPE_OBJECT)
goto out;
amf_get_string(pb, buf, sizeof(buf));
if (strcmp(buf, "type") || avio_r8(pb) != AMF_DATA_TYPE_STRING)
goto out;
amf_get_string(pb, buf, sizeof(buf));
//FIXME parse it as codec_id
amf_get_string(pb, buf, sizeof(buf));
if (strcmp(buf, "text") || avio_r8(pb) != AMF_DATA_TYPE_STRING)
goto out;
length = avio_rb16(pb);
ret = av_get_packet(s->pb, pkt, length);
if (ret < 0) {
ret = AVERROR(EIO);
goto out;
}
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->id == 2)
break;
}
if (i == s->nb_streams) {
st = create_stream(s, 2, AVMEDIA_TYPE_DATA);
if (!st)
goto out;
st->codec->codec_id = CODEC_ID_TEXT;
}
pkt->dts = dts;
pkt->pts = dts;
pkt->size = ret;
pkt->stream_index = st->index;
pkt->flags |= AV_PKT_FLAG_KEY;
avio_seek(s->pb, next + 4, SEEK_SET);
out:
return ret;
}
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
FLVContext *flv = s->priv_data;
......@@ -548,7 +613,8 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
}
if(i == s->nb_streams){
av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed\n");
st= create_stream(s, stream_type);
st = create_stream(s, stream_type,
(int[]){AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_DATA}[stream_type]);
}
av_dlog(s, "%d %X %d \n", stream_type, flags, st->discard);
if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))
......
......@@ -182,7 +182,7 @@ static int flv_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
FLVContext *flv = s->priv_data;
AVCodecContext *audio_enc = NULL, *video_enc = NULL;
AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
int i, metadata_count = 0;
double framerate = 0.0;
int64_t metadata_size_pos, data_size, metadata_count_pos;
......@@ -191,7 +191,8 @@ static int flv_write_header(AVFormatContext *s)
for(i=0; i<s->nb_streams; i++){
AVCodecContext *enc = s->streams[i]->codec;
FLVStreamContext *sc;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
framerate = av_q2d(s->streams[i]->r_frame_rate);
} else {
......@@ -202,10 +203,22 @@ static int flv_write_header(AVFormatContext *s)
av_log(s, AV_LOG_ERROR, "video codec not compatible with flv\n");
return -1;
}
} else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
break;
case AVMEDIA_TYPE_AUDIO:
audio_enc = enc;
if (get_audio_flags(s, enc) < 0)
return -1;
return AVERROR_INVALIDDATA;
break;
case AVMEDIA_TYPE_DATA:
if (enc->codec_id != CODEC_ID_TEXT) {
av_log(s, AV_LOG_ERROR, "codec not compatible with flv\n");
return AVERROR_INVALIDDATA;
}
data_enc = enc;
break;
default:
av_log(s, AV_LOG_ERROR, "codec not compatible with flv\n");
return -1;
}
avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
......@@ -251,7 +264,11 @@ static int flv_write_header(AVFormatContext *s)
/* mixed array (hash) with size and string/type/data tuples */
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
metadata_count_pos = avio_tell(pb);
metadata_count = 5*!!video_enc + 5*!!audio_enc + 2; // +2 for duration and file size
metadata_count = 5 * !!video_enc +
5 * !!audio_enc +
1 * !!data_enc +
2; // +2 for duration and file size
avio_wb32(pb, metadata_count);
put_amf_string(pb, "duration");
......@@ -292,6 +309,11 @@ static int flv_write_header(AVFormatContext *s)
put_amf_double(pb, audio_enc->codec_tag);
}
if (data_enc) {
put_amf_string(pb, "datastream");
put_amf_double(pb, 0.0);
}
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
if( !strcmp(tag->key, "width")
||!strcmp(tag->key, "height")
......@@ -416,7 +438,8 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
else
flags_size= 1;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
flags = enc->codec_tag;
......@@ -426,20 +449,20 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
}
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
} else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
break;
case AVMEDIA_TYPE_AUDIO:
flags = get_audio_flags(s, enc);
assert(size);
avio_w8(pb, FLV_TAG_TYPE_AUDIO);
} else {
// In-band flv metadata ("scriptdata")
assert(enc->codec_type == AVMEDIA_TYPE_DATA);
break;
case AVMEDIA_TYPE_DATA:
avio_w8(pb, FLV_TAG_TYPE_META);
flags_size = 0;
flags = 0;
break;
default:
return AVERROR(EINVAL);
}
if (enc->codec_id == CODEC_ID_H264 || enc->codec_id == CODEC_ID_MPEG4) {
/* check if extradata looks like mp4 formated */
if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) {
......@@ -476,9 +499,29 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
avio_w8(pb,(ts >> 24) & 0x7F); // timestamps are 32bits _signed_
avio_wb24(pb,flv->reserved);
if(flags_size)
avio_w8(pb,flags);
if (enc->codec_type == AVMEDIA_TYPE_DATA) {
int data_size;
int metadata_size_pos = avio_tell(pb);
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onTextData");
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
avio_wb32(pb, 2);
put_amf_string(pb, "type");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "Text");
put_amf_string(pb, "text");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, pkt->data);
put_amf_string(pb, "");
avio_w8(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size = avio_tell(pb) - metadata_size_pos;
avio_seek(pb, metadata_size_pos - 10, SEEK_SET);
avio_wb24(pb, data_size);
avio_seek(pb, data_size + 10 - 3, SEEK_CUR);
avio_wb32(pb, data_size + 11);
} else {
avio_w8(pb,flags);
if (enc->codec_id == CODEC_ID_VP6)
avio_w8(pb,0);
if (enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_VP6A)
......@@ -494,7 +537,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
avio_wb32(pb,size+flags_size+11); // previous tag size
flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration);
}
avio_flush(pb);
av_free(data);
......
......@@ -24,13 +24,22 @@
* RTMP protocol based on http://rtmpdump.mplayerhq.hu/ librtmp
*/
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "url.h"
#include <librtmp/rtmp.h>
#include <librtmp/log.h>
typedef struct LibRTMPContext {
const AVClass *class;
RTMP rtmp;
char *app;
char *playpath;
} LibRTMPContext;
static void rtmp_log(int level, const char *fmt, va_list args)
{
switch (level) {
......@@ -49,7 +58,8 @@ static void rtmp_log(int level, const char *fmt, va_list args)
static int rtmp_close(URLContext *s)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
RTMP_Close(r);
return 0;
......@@ -69,24 +79,45 @@ static int rtmp_close(URLContext *s)
*/
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
RTMP *r = s->priv_data;
int rc;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
int rc = 0, level;
char *filename = s->filename;
switch (av_log_get_level()) {
default:
case AV_LOG_FATAL: rc = RTMP_LOGCRIT; break;
case AV_LOG_ERROR: rc = RTMP_LOGERROR; break;
case AV_LOG_WARNING: rc = RTMP_LOGWARNING; break;
case AV_LOG_INFO: rc = RTMP_LOGINFO; break;
case AV_LOG_VERBOSE: rc = RTMP_LOGDEBUG; break;
case AV_LOG_DEBUG: rc = RTMP_LOGDEBUG2; break;
case AV_LOG_FATAL: level = RTMP_LOGCRIT; break;
case AV_LOG_ERROR: level = RTMP_LOGERROR; break;
case AV_LOG_WARNING: level = RTMP_LOGWARNING; break;
case AV_LOG_INFO: level = RTMP_LOGINFO; break;
case AV_LOG_VERBOSE: level = RTMP_LOGDEBUG; break;
case AV_LOG_DEBUG: level = RTMP_LOGDEBUG2; break;
}
RTMP_LogSetLevel(rc);
RTMP_LogSetLevel(level);
RTMP_LogSetCallback(rtmp_log);
if (ctx->app || ctx->playpath) {
int len = strlen(s->filename) + 1;
if (ctx->app) len += strlen(ctx->app) + sizeof(" app=");
if (ctx->playpath) len += strlen(ctx->playpath) + sizeof(" playpath=");
if (!(filename = av_malloc(len)))
return AVERROR(ENOMEM);
av_strlcpy(filename, s->filename, len);
if (ctx->app) {
av_strlcat(filename, " app=", len);
av_strlcat(filename, ctx->app, len);
}
if (ctx->playpath) {
av_strlcat(filename, " playpath=", len);
av_strlcat(filename, ctx->playpath, len);
}
}
RTMP_Init(r);
if (!RTMP_SetupURL(r, s->filename)) {
rc = -1;
if (!RTMP_SetupURL(r, filename)) {
rc = AVERROR_UNKNOWN;
goto fail;
}
......@@ -94,43 +125,49 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
RTMP_EnableWrite(r);
if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) {
rc = -1;
rc = AVERROR_UNKNOWN;
goto fail;
}
s->is_streamed = 1;
return 0;
rc = 0;
fail:
if (filename != s->filename)
av_freep(&filename);
return rc;
}
static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
return RTMP_Write(r, buf, size);
}
static int rtmp_read(URLContext *s, uint8_t *buf, int size)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
return RTMP_Read(r, buf, size);
}
static int rtmp_read_pause(URLContext *s, int pause)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
if (!RTMP_Pause(r, pause))
return -1;
return AVERROR_UNKNOWN;
return 0;
}
static int64_t rtmp_read_seek(URLContext *s, int stream_index,
int64_t timestamp, int flags)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
if (flags & AVSEEK_FLAG_BYTE)
return AVERROR(ENOSYS);
......@@ -141,17 +178,36 @@ static int64_t rtmp_read_seek(URLContext *s, int stream_index,
flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP);
if (!RTMP_SendSeek(r, timestamp))
return -1;
return AVERROR_UNKNOWN;
return timestamp;
}
static int rtmp_get_file_handle(URLContext *s)
{
RTMP *r = s->priv_data;
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
return RTMP_Socket(r);
}
#define OFFSET(x) offsetof(LibRTMPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
#define RTMP_CLASS(flavor)\
static const AVClass lib ## flavor ## _class = {\
.class_name = "lib" #flavor " protocol",\
.item_name = av_default_item_name,\
.option = options,\
.version = LIBAVUTIL_VERSION_INT,\
};
RTMP_CLASS(rtmp)
URLProtocol ff_librtmp_protocol = {
.name = "rtmp",
.url_open = rtmp_open,
......@@ -161,10 +217,12 @@ URLProtocol ff_librtmp_protocol = {
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(RTMP),
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmp_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
RTMP_CLASS(rtmpt)
URLProtocol ff_librtmpt_protocol = {
.name = "rtmpt",
.url_open = rtmp_open,
......@@ -174,10 +232,12 @@ URLProtocol ff_librtmpt_protocol = {
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(RTMP),
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpt_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
RTMP_CLASS(rtmpe)
URLProtocol ff_librtmpe_protocol = {
.name = "rtmpe",
.url_open = rtmp_open,
......@@ -187,10 +247,12 @@ URLProtocol ff_librtmpe_protocol = {
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(RTMP),
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpe_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
RTMP_CLASS(rtmpte)
URLProtocol ff_librtmpte_protocol = {
.name = "rtmpte",
.url_open = rtmp_open,
......@@ -200,10 +262,12 @@ URLProtocol ff_librtmpte_protocol = {
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(RTMP),
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpte_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
RTMP_CLASS(rtmps)
URLProtocol ff_librtmps_protocol = {
.name = "rtmps",
.url_open = rtmp_open,
......@@ -213,6 +277,7 @@ URLProtocol ff_librtmps_protocol = {
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(RTMP),
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmps_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment