Skip to content

Commit

Permalink
A lot of fun :)
Browse files Browse the repository at this point in the history
  • Loading branch information
cube2222 committed Mar 6, 2019
1 parent 1f5a3e3 commit ec7d612
Show file tree
Hide file tree
Showing 10 changed files with 244 additions and 50 deletions.
29 changes: 26 additions & 3 deletions cmd/octosql/main.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
package main

import (
"context"
"log"

"github.com/xwb1989/sqlparser"
"github.com/cube2222/octosql"
"github.com/cube2222/octosql/storage/json"
)

func main() {
stmt, err := sqlparser.Parse("SELECT prefix(name, 3), age FROM (SELECT * FROM users) g")
/*stmt, err := sqlparser.Parse("SELECT prefix(name, 3), age FROM (SELECT * FROM users) g")
if err != nil {
log.Println(err)
}
Expand All @@ -17,7 +19,7 @@ func main() {
if typed, ok := stmt.(*sqlparser.Select); ok {
log.Println(typed)
log.Println(typed)
}
}*/

/*client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Expand All @@ -44,4 +46,25 @@ func main() {
}
log.Printf("%+v", result)*/

desc := json.NewJSONDataSourceDescription("people.json")
ds, err := desc.Initialize(context.Background(), nil)
if err != nil {
log.Fatal(err)
}
records, err := ds.Get(nil)
if err != nil {
log.Fatal(err)
}
var record *octosql.Record
for record, err = records.Next(); err == nil; record, err = records.Next() {
log.Printf("%+v", record.Fields())
log.Printf("%+v", record.Value("city"))
poch := record.Value("pochodzenie")
if poch != nil {
log.Printf("%+v", poch.([]interface{})[0])
}
}
if err != nil {
log.Fatal(err)
}
}
5 changes: 4 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
module github.com/cube2222/octosql

require github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
require (
github.com/pkg/errors v0.8.1
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
)
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY=
73 changes: 73 additions & 0 deletions logic.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package octosql

type RelationType string

const (
Equal RelationType = "equal"
NotEqual RelationType = "not_equal"
MoreThan RelationType = "more_than"
LessThan RelationType = "less_than"
Like RelationType = "like"
In RelationType = "in"
)

type Formula interface {
Evaluate(record Record, primitives map[string]interface{}) bool
Fields() map[string]struct{}
Primitives() map[string]struct{}
}

// TODO: Implement:

type And struct {
Left, Right Formula
}

type Or struct {
Left, Right Formula
}

type Not struct {
Child Formula
}

type Predicate struct {
Left Expression
Filter RelationType
Right Expression
}

func (p *Predicate) Evaluate(record Record, primitives map[string]interface{}) bool {
panic("implement me")
return true
}

type Expression interface {
ExpressionValue(record Record, primitives map[string]interface{}) interface{}
}

type Primitive struct {
id string
}

func (p *Primitive) ExpressionValue(record Record, primitives map[string]interface{}) interface{} {
return primitives[p.id]
}

type FieldReference struct {
id FieldIdentifier
}

func (f *FieldReference) ExpressionValue(record Record, primitives map[string]interface{}) interface{} {
return record.Value(f.id)
}

var expression = Predicate{
Left: &FieldReference{
id: "City",
},
Filter: Equal,
Right: &Primitive{
id: "city_x",
},
}
4 changes: 4 additions & 0 deletions people.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{"name": "jan", "surname": "chomiak", "age": 3, "city": "warsaw"}
{"name": "wojtek", "surname": "kuzminski", "age": 4, "city": "warsaw"}
{"name": "adam", "surname": "cz", "age": 5, "city": "ciechanowo"}
{"name": "Kuba", "surname": "M", "age": 2, "city": "ciechanowo", "pochodzenie": ["polska", "niemcy"]}
9 changes: 5 additions & 4 deletions physical.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,15 @@ type UnionAll struct {
}

func (union *UnionAll) Get(predicateValues []interface{}) (RecordStream, error) {
stream, err := union.left.Get(predicateValues)
panic("not implemented yet")

/*stream, err := union.left.Get(predicateValues)
if err != nil {
return nil, err
}
streamSecond, err := union.right.Get(predicateValues)
if err != nil {
return nil, err
}

return ConcatStreams(stream, streamSecond), nil
}*/
//return ConcatStreams(stream, streamSecond), nil
}
37 changes: 4 additions & 33 deletions storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,6 @@ package octosql

import "context"

/* TODO: To też w sumie niepotrzebne, bo to filtrowanie bez predykatów.
type GetAll interface {
GetAll(ctx context.Context) (RecordStream, error)
}*/

/*
TODO: Może tak, może nie?
Zobaczymy, czy warto mieć taki single record Get. Ale to chyba przerost formy nad treścią,
jak wszędzie trzeba będzie operować na strumieniach.
type GetByPrimaryKey interface {
GetByPrimaryKey(ctx context.Context, key interface{}) (*Record, error)
}*/

type FieldType string

// TODO: composite primary key?
Expand All @@ -23,32 +10,16 @@ const (
Secondary FieldType = "secondary"
)

type FilterType string

const (
Equal FilterType = "equal"
NotEqual FilterType = "not_equal"
MoreThan FilterType = "more_than"
LessThan FilterType = "less_than"
Like FilterType = "like"
In FilterType = "in"
)

// TODO: Co jesli zaleznosc miedzy kolumnami?
type Predicate struct {
Field string
Filter FilterType
}

// For databases this should return a static list of available filters based on the database type
// and probably initialize some templated query for the given predicates when asked to initialize.
// For operators (like where or join) this should return filters based on the filters in the data sources under this.
// For a file data source this could create an index for the predicated fields to achieve faster access.
type DataSourceDescription interface {
Initialize(ctx context.Context, predicates []Predicate) (DataSource, error)
AvailableFilters() map[FieldType]map[FilterType]struct{}
Initialize(ctx context.Context, predicates Formula) (DataSource, error)
PrimaryKeys() map[string]struct{}
AvailableFilters() map[FieldType]map[RelationType]struct{}
}

type DataSource interface {
Get(predicateValues []interface{}) (RecordStream, error)
Get(primitiveValues map[string]interface{}) (RecordStream, error)
}
53 changes: 53 additions & 0 deletions storage/json/datasource.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
package json

import (
"bufio"
"encoding/json"
"os"

"github.com/cube2222/octosql"
"github.com/pkg/errors"
)

type JSONDataSource struct {
path string
}

func (ds *JSONDataSource) Get(primitiveValues map[string]interface{}) (octosql.RecordStream, error) {
file, err := os.Open(ds.path)
if err != nil {
return nil, errors.Wrap(err, "couldn't open file")
}
sc := bufio.NewScanner(file)
return &JSONRecordStream{
file: file,
sc: sc,
isDone: false,
}, nil
}

type JSONRecordStream struct {
file *os.File
sc *bufio.Scanner
isDone bool
}

func (rs *JSONRecordStream) Next() (*octosql.Record, error) {
if rs.isDone {
return nil, octosql.ErrEndOfStream
}

if !rs.sc.Scan() {
rs.isDone = true
rs.file.Close()
return nil, octosql.ErrEndOfStream
}

var record map[string]interface{}
err := json.Unmarshal(rs.sc.Bytes(), &record)
if err != nil {
return nil, errors.Wrap(err, "couldn't unmarshal json record")
}

return octosql.NewRecord(record), nil
}
41 changes: 41 additions & 0 deletions storage/json/description.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package json

import (
"context"
"os"

"github.com/cube2222/octosql"
"github.com/pkg/errors"
)

type JSONDataSourceDescription struct {
path string
}

func NewJSONDataSourceDescription(path string) *JSONDataSourceDescription {
return &JSONDataSourceDescription{
path: path,
}
}

func (dsd *JSONDataSourceDescription) Initialize(ctx context.Context, predicates octosql.Formula) (octosql.DataSource, error) {
_, err := os.Stat(dsd.path)
if err != nil {
return nil, errors.Wrap(err, "couldn't stat file")
}

return &JSONDataSource{
path: dsd.path,
}, nil
}

func (dsd *JSONDataSourceDescription) PrimaryKeys() map[string]struct{} {
return make(map[string]struct{})
}

func (dsd *JSONDataSourceDescription) AvailableFilters() map[octosql.FieldType]map[octosql.RelationType]struct{} {
return map[octosql.FieldType]map[octosql.RelationType]struct{}{
octosql.Primary: make(map[octosql.RelationType]struct{}),
octosql.Secondary: make(map[octosql.RelationType]struct{}),
}
}
41 changes: 32 additions & 9 deletions uniquery.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
package octosql

import "errors"
import (
"errors"
)

type Datatype string

const (
ColumnString Datatype = "string"
ColumnInt Datatype = "int"
DatatypeString Datatype = "string"
DatatypeInt Datatype = "int"
)

type FieldIdentifier string
Expand All @@ -16,17 +18,38 @@ type Field struct {
Type Datatype
}

type Value struct {
Value interface{}
type Record struct {
data map[string]interface{}
}

func NewRecord(data map[string]interface{}) *Record {
return &Record{
data: data,
}
}

func (r *Record) Value(field FieldIdentifier) interface{} {
return r.data[string(field)]
}

func (r *Record) Fields() []Field {
fields := make([]Field, 0)
for k := range r.data {
fields = append(fields, Field{
Name: FieldIdentifier(k),
Type: getType(r.data[k]),
})
}

return fields
}

type Record interface {
Value(field FieldIdentifier) Value
Fields() []Field
func getType(i interface{}) Datatype {
return DatatypeString
}

type RecordStream interface {
Next() (Record, error)
Next() (*Record, error)
}

var ErrEndOfStream = errors.New("end of stream")

0 comments on commit ec7d612

Please sign in to comment.