Skip to content

Commit

Permalink
remove unused streamer
Browse files Browse the repository at this point in the history
  • Loading branch information
meiji163 committed Nov 18, 2024
1 parent 6aaa374 commit f6ec835
Show file tree
Hide file tree
Showing 6 changed files with 50 additions and 729 deletions.
266 changes: 4 additions & 262 deletions go/binlog/gomysql_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,10 @@
package binlog

import (
"bytes"
"fmt"
"sync"

"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"

"time"

Expand Down Expand Up @@ -77,116 +74,13 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinate
return &returnCoordinates
}

// handleRowsEvents processes a RowEvent from the binlog and sends the DML event to the entriesChannel.
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
if this.currentCoordinates.IsLogPosOverflowBeyond4Bytes(&this.LastAppliedRowsEventHint) {
return fmt.Errorf("Unexpected rows event at %+v, the binlog end_log_pos is overflow 4 bytes", this.currentCoordinates)
}

if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
this.migrationContext.Log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
return nil
}

dml := ToEventDML(ev.Header.EventType.String())
if dml == NotDML {
return fmt.Errorf("Unknown DML type: %s", ev.Header.EventType.String())
}
for i, row := range rowsEvent.Rows {
if dml == UpdateDML && i%2 == 1 {
// An update has two rows (WHERE+SET)
// We do both at the same time
continue
}
binlogEntry := NewBinlogEntryAt(this.currentCoordinates)
binlogEntry.DmlEvent = NewBinlogDMLEvent(
string(rowsEvent.Table.Schema),
string(rowsEvent.Table.Table),
dml,
)
switch dml {
case InsertDML:

{
binlogEntry.DmlEvent.NewColumnValues = sql.ToColumnValues(row)
}
case UpdateDML:
{
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
binlogEntry.DmlEvent.NewColumnValues = sql.ToColumnValues(rowsEvent.Rows[i+1])
}
case DeleteDML:
{
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
}
}
// The channel will do the throttling. Whoever is reading from the channel
// decides whether action is taken synchronously (meaning we wait before
// next iteration) or asynchronously (we keep pushing more events)
// In reality, reads will be synchronous
entriesChannel <- binlogEntry
}
this.LastAppliedRowsEventHint = this.currentCoordinates
return nil
}

// RowsEventToBinlogEntry processes MySQL RowsEvent into our BinlogEntry for later application.
// copied from handleRowEvents
func RowsEventToBinlogEntry(eventType replication.EventType, rowsEvent *replication.RowsEvent, binlogCoords mysql.BinlogCoordinates) (*BinlogEntry, error) {
dml := ToEventDML(eventType.String())
if dml == NotDML {
return nil, fmt.Errorf("Unknown DML type: %s", eventType.String())
}
binlogEntry := NewBinlogEntryAt(binlogCoords)
for i, row := range rowsEvent.Rows {
if dml == UpdateDML && i%2 == 1 {
// An update has two rows (WHERE+SET)
// We do both at the same time
continue
}
binlogEntry.DmlEvent = NewBinlogDMLEvent(
string(rowsEvent.Table.Schema),
string(rowsEvent.Table.Table),
dml,
)
switch dml {
case InsertDML:
{
binlogEntry.DmlEvent.NewColumnValues = sql.ToColumnValues(row)
}
case UpdateDML:
{
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
binlogEntry.DmlEvent.NewColumnValues = sql.ToColumnValues(rowsEvent.Rows[i+1])
}
case DeleteDML:
{
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
}
}
}
return binlogEntry, nil
}

type Transaction struct {
SequenceNumber int64
LastCommitted int64
Changes chan *BinlogEntry
}

func (this *GoMySQLReader) StreamTransactions(ctx context.Context, transactionsChannel chan<- *Transaction) error {
if err := ctx.Err(); err != nil {
return err
}

previousSequenceNumber := int64(0)

groups:
// StreamEvents reads binlog events and sends them to the given channel.
// It is blocking and should be executed in a goroutine.
func (this *GoMySQLReader) StreamEvents(ctx context.Context, eventChannel chan<- *replication.BinlogEvent) error {
for {
if err := ctx.Err(); err != nil {
return err
}

ev, err := this.binlogStreamer.GetEvent(ctx)
if err != nil {
return err
Expand All @@ -197,160 +91,8 @@ groups:
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
this.currentCoordinates.EventSize = int64(ev.Header.EventSize)
}()

fmt.Printf("Event: %s\n", ev.Header.EventType)

// Read each event and do something with it
//
// First, ignore all events until we find the next GTID event so that we can start
// at a transaction boundary.
//
// Once we find a GTID event, we can start an event group,
// and then process all events in that group.
// An event group is defined as all events that are part of the same transaction,
// which is defined as all events between the GTID event, a `QueryEvent` containing a `BEGIN` query and ends with
// either a XIDEvent or a `QueryEvent` containing a `COMMIT` or `ROLLBACK` query.
//
// Each group is a struct containing the SequenceNumber, LastCommitted, and a channel of events.
//
// Once the group has ended, we can start looking for the next GTID event.

var group *Transaction
switch binlogEvent := ev.Event.(type) {
case *replication.GTIDEvent:
this.migrationContext.Log.Infof("GTIDEvent: %+v", binlogEvent)

// Bail out if we find a gap in the sequence numbers
if previousSequenceNumber != 0 && binlogEvent.SequenceNumber != previousSequenceNumber+1 {
return fmt.Errorf("unexpected sequence number: %d, expected %d", binlogEvent.SequenceNumber, previousSequenceNumber+1)
}

group = &Transaction{
SequenceNumber: binlogEvent.SequenceNumber,
LastCommitted: binlogEvent.LastCommitted,
Changes: make(chan *BinlogEntry, 1000),
}

previousSequenceNumber = binlogEvent.SequenceNumber

// We are good to send the transaction, the transaction events arrive async
this.migrationContext.Log.Infof("sending transaction: %d %d", group.SequenceNumber, group.LastCommitted)
transactionsChannel <- group
default:
this.migrationContext.Log.Infof("Ignoring Event: %+v", ev.Event)
continue
}

// Next event should be a query event

ev, err = this.binlogStreamer.GetEvent(ctx)
if err != nil {
close(group.Changes)
return err
}
this.migrationContext.Log.Infof("1 - Event: %s", ev.Header.EventType)

switch binlogEvent := ev.Event.(type) {
case *replication.QueryEvent:
if bytes.Equal([]byte("BEGIN"), binlogEvent.Query) {
this.migrationContext.Log.Infof("BEGIN for transaction in schema %s", binlogEvent.Schema)
} else {
this.migrationContext.Log.Infof("QueryEvent: %+v", binlogEvent)
this.migrationContext.Log.Infof("Query: %s", binlogEvent.Query)

close(group.Changes)

// wait for the next event group
continue groups
}
default:
this.migrationContext.Log.Infof("unexpected Event: %+v", ev.Event)
close(group.Changes)

// TODO: handle the group - we want to make sure we process the group's LastCommitted and SequenceNumber

// wait for the next event group
continue groups
}

// Next event should be a table map event

events:
// Now we can start processing the group
for {
ev, err = this.binlogStreamer.GetEvent(ctx)
if err != nil {
close(group.Changes)
return err
}
this.migrationContext.Log.Infof("3 - Event: %s", ev.Header.EventType)

switch binlogEvent := ev.Event.(type) {
case *replication.TableMapEvent:
this.migrationContext.Log.Infof("TableMapEvent for %s.%s: %+v", binlogEvent.Schema, binlogEvent.Table, binlogEvent)
case *replication.RowsEvent:
binlogEntry, err := RowsEventToBinlogEntry(ev.Header.EventType, binlogEvent, this.currentCoordinates)
if err != nil {
close(group.Changes)
return err
}
this.migrationContext.Log.Infof("RowsEvent: %v", binlogEvent)
group.Changes <- binlogEntry
this.migrationContext.Log.Infof("Length of group.Changes: %d", len(group.Changes))
case *replication.XIDEvent:
this.migrationContext.Log.Infof("XIDEvent: %+v", binlogEvent)
this.migrationContext.Log.Infof("COMMIT for transaction")
break events
default:
close(group.Changes)
this.migrationContext.Log.Infof("unexpected Event: %+v", ev.Event)
return fmt.Errorf("unexpected Event: %+v", ev.Event)
}
}

close(group.Changes)

this.migrationContext.Log.Infof("done processing group - %d events", len(group.Changes))
}
}

// StreamEvents
func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesChannel chan<- *BinlogEntry) error {
if canStopStreaming() {
return nil
}
for {
if canStopStreaming() {
break
}
ev, err := this.binlogStreamer.GetEvent(context.Background())
if err != nil {
return err
}
func() {
this.currentCoordinatesMutex.Lock()
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
this.currentCoordinates.EventSize = int64(ev.Header.EventSize)
}()

switch binlogEvent := ev.Event.(type) {
case *replication.RotateEvent:
func() {
this.currentCoordinatesMutex.Lock()
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
}()
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
case *replication.RowsEvent:
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
return err
}
}
eventChannel <- ev
}
this.migrationContext.Log.Debugf("done streaming events")

return nil
}

func (this *GoMySQLReader) Close() error {
Expand Down
Loading

0 comments on commit f6ec835

Please sign in to comment.