cargo-xwin drives the Windows MSVC cross-compile via clang-cl, under which CMake sets MSVC=1 — causing libopus 1.3.1's `if(NOT MSVC)` guards to skip the per-file `-msse4.1` / `-mssse3` COMPILE_FLAGS that its x86 SIMD source files need. Clang-cl (unlike real cl.exe) still honors Clang's target-feature system, so those files then fail to compile with "always_inline function '_mm_cvtepi16_epi32' requires target feature 'sse4.1'" errors across silk/NSQ_sse4_1.c, NSQ_del_dec_sse4_1.c, and VQ_WMat_EC_sse4_1.c. Earlier attempts to fix this downstream (cargo-xwin toolchain file, override.cmake CMAKE_C_COMPILE_OBJECT <FLAGS> replace, CFLAGS env vars) all failed because cargo-xwin rewrites override.cmake from scratch on every `cargo xwin build` invocation and cmake-rs's -DCMAKE_C_FLAGS= assembly happens before toolchain FORCE sets propagate. Fixing it upstream at the source: vendor audiopus_sys 0.2.2 into vendor/audiopus_sys, patch its bundled opus/CMakeLists.txt to introduce an MSVC_CL var (true only when CMAKE_C_COMPILER_ID == "MSVC", i.e. real cl.exe), and flip the eight `if(NOT MSVC)` SIMD guards to `if(NOT MSVC_CL)`. Clang-cl then gets the GCC-style per-file flags and the SSE4.1 sources build cleanly. Also flip the `if(MSVC)` global /arch block at line 445 to `if(MSVC_CL)` so only cl.exe applies /arch:AVX and clang-cl relies purely on per-file flags (no global/per-file mixing). Wire via [patch.crates-io] in the workspace root Cargo.toml; the patch is resolved relative to the workspace root as `vendor/audiopus_sys`. Upstream context: xiph/opus#256, xiph/opus PR #257 (both stale). Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
68 lines
2.1 KiB
Python
Executable File
68 lines
2.1 KiB
Python
Executable File
#!/usr/bin/python
|
|
|
|
from __future__ import print_function
|
|
|
|
from keras.models import Sequential
|
|
from keras.models import Model
|
|
from keras.layers import Input
|
|
from keras.layers import Dense
|
|
from keras.layers import LSTM
|
|
from keras.layers import GRU
|
|
from keras.layers import SimpleRNN
|
|
from keras.layers import Dropout
|
|
from keras import losses
|
|
import h5py
|
|
|
|
from keras import backend as K
|
|
import numpy as np
|
|
|
|
def binary_crossentrop2(y_true, y_pred):
|
|
return K.mean(2*K.abs(y_true-0.5) * K.binary_crossentropy(y_pred, y_true), axis=-1)
|
|
|
|
print('Build model...')
|
|
#model = Sequential()
|
|
#model.add(Dense(16, activation='tanh', input_shape=(None, 25)))
|
|
#model.add(GRU(12, dropout=0.0, recurrent_dropout=0.0, activation='tanh', recurrent_activation='sigmoid', return_sequences=True))
|
|
#model.add(Dense(2, activation='sigmoid'))
|
|
|
|
main_input = Input(shape=(None, 25), name='main_input')
|
|
x = Dense(16, activation='tanh')(main_input)
|
|
x = GRU(12, dropout=0.1, recurrent_dropout=0.1, activation='tanh', recurrent_activation='sigmoid', return_sequences=True)(x)
|
|
x = Dense(2, activation='sigmoid')(x)
|
|
model = Model(inputs=main_input, outputs=x)
|
|
|
|
batch_size = 64
|
|
|
|
print('Loading data...')
|
|
with h5py.File('features.h5', 'r') as hf:
|
|
all_data = hf['features'][:]
|
|
print('done.')
|
|
|
|
window_size = 1500
|
|
|
|
nb_sequences = len(all_data)/window_size
|
|
print(nb_sequences, ' sequences')
|
|
x_train = all_data[:nb_sequences*window_size, :-2]
|
|
x_train = np.reshape(x_train, (nb_sequences, window_size, 25))
|
|
|
|
y_train = np.copy(all_data[:nb_sequences*window_size, -2:])
|
|
y_train = np.reshape(y_train, (nb_sequences, window_size, 2))
|
|
|
|
all_data = 0;
|
|
x_train = x_train.astype('float32')
|
|
y_train = y_train.astype('float32')
|
|
|
|
print(len(x_train), 'train sequences. x shape =', x_train.shape, 'y shape = ', y_train.shape)
|
|
|
|
# try using different optimizers and different optimizer configs
|
|
model.compile(loss=binary_crossentrop2,
|
|
optimizer='adam',
|
|
metrics=['binary_accuracy'])
|
|
|
|
print('Train...')
|
|
model.fit(x_train, y_train,
|
|
batch_size=batch_size,
|
|
epochs=200,
|
|
validation_data=(x_train, y_train))
|
|
model.save("newweights.hdf5")
|